summaryrefslogtreecommitdiff
path: root/src/test/java
diff options
context:
space:
mode:
authorHilko Bengen <bengen@debian.org>2014-06-07 12:02:12 +0200
committerHilko Bengen <bengen@debian.org>2014-06-07 12:02:12 +0200
commitd5ed89b946297270ec28abf44bef2371a06f1f4f (patch)
treece2d945e4dde69af90bd9905a70d8d27f4936776 /src/test/java
downloadelasticsearch-d5ed89b946297270ec28abf44bef2371a06f1f4f.tar.gz
Imported Upstream version 1.0.3upstream/1.0.3
Diffstat (limited to 'src/test/java')
-rw-r--r--src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java68
-rw-r--r--src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java71
-rw-r--r--src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java65
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java107
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java487
-rw-r--r--src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java1693
-rw-r--r--src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java367
-rw-r--r--src/test/java/org/apache/lucene/util/SloppyMathTests.java98
-rw-r--r--src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java47
-rw-r--r--src/test/java/org/elasticsearch/VersionTests.java75
-rw-r--r--src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java134
-rw-r--r--src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java147
-rw-r--r--src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java37
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java70
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java101
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/bulk-log.json24
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json5
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json7
-rw-r--r--src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json5
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java164
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/mpercolate1.json12
-rw-r--r--src/test/java/org/elasticsearch/action/percolate/mpercolate2.json6
-rw-r--r--src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java90
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch1.json10
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch2.json10
-rw-r--r--src/test/java/org/elasticsearch/action/search/simple-msearch3.json8
-rw-r--r--src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java55
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java411
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java256
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java544
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java73
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java328
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/multiRequest1.json13
-rw-r--r--src/test/java/org/elasticsearch/action/termvector/multiRequest2.json26
-rw-r--r--src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java834
-rw-r--r--src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java138
-rw-r--r--src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java63
-rw-r--r--src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java85
-rw-r--r--src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java87
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java96
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java123
-rw-r--r--src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java140
-rw-r--r--src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java66
-rw-r--r--src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java160
-rw-r--r--src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java81
-rw-r--r--src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java55
-rw-r--r--src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java256
-rw-r--r--src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java159
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java337
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java101
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java136
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java83
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java45
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java54
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java49
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java74
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java166
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java311
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java182
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java367
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java253
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java434
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java208
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java202
-rw-r--r--src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java193
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java283
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java123
-rw-r--r--src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java109
-rw-r--r--src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java70
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java59
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java72
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java150
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java191
-rw-r--r--src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java157
-rw-r--r--src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java65
-rw-r--r--src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java173
-rw-r--r--src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java111
-rw-r--r--src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java142
-rw-r--r--src/test/java/org/elasticsearch/client/transport/TransportClientTests.java39
-rw-r--r--src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java57
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java216
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java65
-rw-r--r--src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java658
-rw-r--r--src/test/java/org/elasticsearch/cluster/DiskUsageTests.java60
-rw-r--r--src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java307
-rw-r--r--src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java130
-rw-r--r--src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java120
-rw-r--r--src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java76
-rw-r--r--src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java138
-rw-r--r--src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java62
-rw-r--r--src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java112
-rw-r--r--src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java129
-rw-r--r--src/test/java/org/elasticsearch/cluster/ack/AckTests.java422
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java228
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java215
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java147
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java68
-rw-r--r--src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java80
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java346
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java485
-rw-r--r--src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java193
-rw-r--r--src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java119
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java435
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java67
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java392
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java827
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java509
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java626
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java154
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java245
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java146
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java113
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java163
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java485
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java168
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java538
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java340
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java123
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java102
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java143
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java93
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java214
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java150
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java105
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java415
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java36
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java93
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java101
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java188
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java412
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java169
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java178
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java173
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java177
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java597
-rw-r--r--src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java148
-rw-r--r--src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java88
-rw-r--r--src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java67
-rw-r--r--src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java109
-rw-r--r--src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java87
-rw-r--r--src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java126
-rw-r--r--src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java361
-rw-r--r--src/test/java/org/elasticsearch/codecs/CodecTests.java122
-rw-r--r--src/test/java/org/elasticsearch/common/BooleansTests.java43
-rw-r--r--src/test/java/org/elasticsearch/common/ParseFieldTests.java74
-rw-r--r--src/test/java/org/elasticsearch/common/StringsTests.java36
-rw-r--r--src/test/java/org/elasticsearch/common/TableTests.java153
-rw-r--r--src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java106
-rw-r--r--src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java50
-rw-r--r--src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java55
-rw-r--r--src/test/java/org/elasticsearch/common/geo/GeoHashTests.java59
-rw-r--r--src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java256
-rw-r--r--src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java196
-rw-r--r--src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java101
-rw-r--r--src/test/java/org/elasticsearch/common/io/StreamsTests.java91
-rw-r--r--src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java90
-rw-r--r--src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java80
-rw-r--r--src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java66
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/LuceneTest.java49
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java342
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java69
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java74
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java66
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java119
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java391
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java567
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java266
-rw-r--r--src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java285
-rw-r--r--src/test/java/org/elasticsearch/common/path/PathTrieTests.java160
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java92
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java29
-rw-r--r--src/test/java/org/elasticsearch/common/regex/RegexTests.java71
-rw-r--r--src/test/java/org/elasticsearch/common/rounding/RoundingTests.java44
-rw-r--r--src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java94
-rw-r--r--src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java178
-rw-r--r--src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java24
-rw-r--r--src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java24
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java52
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java52
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/test-settings.json10
-rw-r--r--src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml8
-rw-r--r--src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java83
-rw-r--r--src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java98
-rw-r--r--src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java62
-rw-r--r--src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java199
-rw-r--r--src/test/java/org/elasticsearch/common/unit/TimeValueTests.java69
-rw-r--r--src/test/java/org/elasticsearch/common/util/BigArraysTests.java195
-rw-r--r--src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java109
-rw-r--r--src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java64
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java120
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java119
-rw-r--r--src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java147
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java105
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java236
-rw-r--r--src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java277
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java124
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java156
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java105
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java68
-rw-r--r--src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java456
-rw-r--r--src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java97
-rw-r--r--src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java827
-rw-r--r--src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java149
-rw-r--r--src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java142
-rw-r--r--src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java83
-rw-r--r--src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java255
-rw-r--r--src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java262
-rw-r--r--src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java147
-rw-r--r--src/test/java/org/elasticsearch/discovery/DiscoveryTests.java55
-rw-r--r--src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java157
-rw-r--r--src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java115
-rw-r--r--src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java45
-rw-r--r--src/test/java/org/elasticsearch/document/BulkTests.java593
-rw-r--r--src/test/java/org/elasticsearch/document/DocumentActionsTests.java291
-rw-r--r--src/test/java/org/elasticsearch/explain/ExplainActionTests.java239
-rw-r--r--src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java94
-rw-r--r--src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java384
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java535
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java189
-rw-r--r--src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java428
-rw-r--r--src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java161
-rw-r--r--src/test/java/org/elasticsearch/get/GetActionTests.java870
-rw-r--r--src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java61
-rw-r--r--src/test/java/org/elasticsearch/index/VersionTypeTests.java112
-rw-r--r--src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java166
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java238
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java59
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java69
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java76
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java90
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java117
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java104
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java113
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java230
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java63
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java73
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java48
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java172
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java49
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java63
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java51
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java71
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java62
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java97
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java128
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json37
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java216
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt2
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json29
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json31
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java42
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/keep_analysis.json19
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/pattern_capture.json46
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json16
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/stop.json18
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java105
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json52
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt3
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt3
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/test1.json80
-rw-r--r--src/test/java/org/elasticsearch/index/analysis/test1.yml61
-rw-r--r--src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java95
-rw-r--r--src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java410
-rw-r--r--src/test/java/org/elasticsearch/index/codec/CodecTests.java430
-rw-r--r--src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java123
-rw-r--r--src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java178
-rw-r--r--src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java55
-rw-r--r--src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java67
-rw-r--r--src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java199
-rw-r--r--src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java1162
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java358
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java104
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java477
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java426
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java93
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java194
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java625
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java32
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java88
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java201
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java193
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java170
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java416
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java82
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java32
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java289
-rw-r--r--src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java80
-rw-r--r--src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java77
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java83
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/UidTests.java46
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java283
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/mapping.json57
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json56
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json55
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json56
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/all/test1.json18
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java159
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java54
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java96
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java68
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java177
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java58
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java114
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java75
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java90
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java226
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java222
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java360
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java152
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java64
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json5
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json14
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java77
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json15
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java173
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json7
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json36
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java48
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java78
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java249
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java120
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java95
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java411
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java118
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java114
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java52
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java89
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java121
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java142
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java408
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java309
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java199
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json4
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json11
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json27
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json18
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json25
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json16
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json8
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json30
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json55
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json50
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java318
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java325
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java75
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java59
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java102
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java73
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json32
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java92
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java147
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json98
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json40
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json41
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json43
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/simple/test1.json41
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java120
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java98
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java202
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java322
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java156
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java140
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java242
-rw-r--r--src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java50
-rw-r--r--src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java178
-rw-r--r--src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java36
-rw-r--r--src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java199
-rw-r--r--src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java2298
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter-cache.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter-named.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/and-filter2.json23
-rw-r--r--src/test/java/org/elasticsearch/index/query/bool-filter.json35
-rw-r--r--src/test/java/org/elasticsearch/index/query/bool.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/boosting-query.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/child-mapping.json12
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query1.json11
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query2.json11
-rw-r--r--src/test/java/org/elasticsearch/index/query/commonTerms-query3.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/constantScore-query.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/custom_score1.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/data.json45
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json26
-rw-r--r--src/test/java/org/elasticsearch/index/query/disMax.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/disMax2.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/field3.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query2.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query3.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/filtered-query4.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/fquery-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/function-filter-score-query.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzy.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/geoShape-filter.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/geoShape-query.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance-named.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance1.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance10.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance11.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance12.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance2.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance3.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance4.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance5.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance6.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance7.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance8.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_distance9.json17
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon-named.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon1.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon2.json27
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon3.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/geo_polygon4.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java107
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/has-child.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/limit-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/mapping.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/match-query-bad-type.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/matchAll.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/match_all_empty1.json3
-rw-r--r--src/test/java/org/elasticsearch/index/query/match_all_empty2.json3
-rw-r--r--src/test/java/org/elasticsearch/index/query/mlt.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/mltField.json9
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter2.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/not-filter3.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/numeric_range-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/or-filter.json25
-rw-r--r--src/test/java/org/elasticsearch/index/query/or-filter2.json23
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java103
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java111
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java64
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix-with-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/prefix.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields-match.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields1.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields2.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-fields3.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query-filter.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/query.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/query2.json6
-rw-r--r--src/test/java/org/elasticsearch/index/query/range-filter-named.json20
-rw-r--r--src/test/java/org/elasticsearch/index/query/range-filter.json19
-rw-r--r--src/test/java/org/elasticsearch/index/query/range.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/range2.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json20
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json18
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/regexp.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/simple-query-string.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json13
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json12
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json16
-rw-r--r--src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json7
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json29
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanFirst.json10
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanNear.json24
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanNot.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanOr.json21
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanOr2.json30
-rw-r--r--src/test/java/org/elasticsearch/index/query/spanTerm.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/starColonStar.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/term-with-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/term.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-filter-named.json15
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-filter.json14
-rw-r--r--src/test/java/org/elasticsearch/index/query/terms-query.json5
-rw-r--r--src/test/java/org/elasticsearch/index/query/wildcard-boost.json8
-rw-r--r--src/test/java/org/elasticsearch/index/query/wildcard.json5
-rw-r--r--src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java253
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java50
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java347
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java256
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/MockScorer.java85
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java245
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java241
-rw-r--r--src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java583
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java68
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java89
-rw-r--r--src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java258
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java347
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java81
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java81
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java50
-rw-r--r--src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java281
-rw-r--r--src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java177
-rw-r--r--src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java210
-rw-r--r--src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java179
-rw-r--r--src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java267
-rw-r--r--src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java55
-rw-r--r--src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java46
-rw-r--r--src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java46
-rw-r--r--src/test/java/org/elasticsearch/indexing/IndexActionTests.java130
-rw-r--r--src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java404
-rw-r--r--src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java919
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java47
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java55
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java37
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java41
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java36
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java43
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java30
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java33
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java37
-rw-r--r--src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java207
-rw-r--r--src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java142
-rw-r--r--src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java93
-rw-r--r--src/test/java/org/elasticsearch/indices/cache/CacheTests.java177
-rw-r--r--src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java72
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java47
-rw-r--r--src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java259
-rw-r--r--src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java132
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java92
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java45
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java106
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java157
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java143
-rw-r--r--src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java477
-rw-r--r--src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java254
-rw-r--r--src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java115
-rw-r--r--src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java105
-rw-r--r--src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java291
-rw-r--r--src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java161
-rw-r--r--src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java425
-rw-r--r--src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java115
-rw-r--r--src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java152
-rw-r--r--src/test/java/org/elasticsearch/indices/store/StrictDistributor.java55
-rw-r--r--src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java89
-rw-r--r--src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java325
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template0.json7
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template1.json7
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template2.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template3.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template4.json9
-rw-r--r--src/test/java/org/elasticsearch/indices/template/template5.json11
-rw-r--r--src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java159
-rw-r--r--src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java352
-rw-r--r--src/test/java/org/elasticsearch/mget/SimpleMgetTests.java164
-rw-r--r--src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java254
-rw-r--r--src/test/java/org/elasticsearch/nested/SimpleNestedTests.java1229
-rw-r--r--src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java55
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java271
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java179
-rw-r--r--src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java399
-rw-r--r--src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java264
-rw-r--r--src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java125
-rw-r--r--src/test/java/org/elasticsearch/percolator/PercolatorTests.java1766
-rw-r--r--src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java443
-rw-r--r--src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java214
-rw-r--r--src/test/java/org/elasticsearch/plugin/PluginManagerTests.java318
-rw-r--r--src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java71
-rw-r--r--src/test/java/org/elasticsearch/plugin/SitePluginTests.java104
-rw-r--r--src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java40
-rw-r--r--src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java46
-rw-r--r--src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java124
-rw-r--r--src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java460
-rw-r--r--src/test/java/org/elasticsearch/recovery/RelocationTests.java419
-rw-r--r--src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java119
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java34
-rw-r--r--src/test/java/org/elasticsearch/rest/helper/HttpClient.java120
-rw-r--r--src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java63
-rw-r--r--src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java125
-rw-r--r--src/test/java/org/elasticsearch/river/RiverTests.java158
-rw-r--r--src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java145
-rw-r--r--src/test/java/org/elasticsearch/routing/AliasRoutingTests.java434
-rw-r--r--src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java440
-rw-r--r--src/test/java/org/elasticsearch/script/IndexLookupTests.java625
-rw-r--r--src/test/java/org/elasticsearch/script/NativeScriptTests.java66
-rw-r--r--src/test/java/org/elasticsearch/script/ScriptFieldTests.java152
-rw-r--r--src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java67
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/CombiTests.java146
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java49
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/RandomTests.java276
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java254
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java1077
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java1058
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java880
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java177
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java439
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java244
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java132
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java790
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java865
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java69
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java848
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java381
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java222
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java183
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java345
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java947
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java324
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java361
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java1024
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java108
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java269
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java398
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java269
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java283
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java370
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java270
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java127
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/package-info.java26
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java141
-rw-r--r--src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java162
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java112
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java123
-rw-r--r--src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java309
-rw-r--r--src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java128
-rw-r--r--src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java439
-rw-r--r--src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java2246
-rw-r--r--src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java95
-rw-r--r--src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java1048
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java90
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java355
-rw-r--r--src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java35
-rw-r--r--src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java2378
-rw-r--r--src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java423
-rw-r--r--src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java387
-rw-r--r--src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java548
-rw-r--r--src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java486
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java715
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java169
-rw-r--r--src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java154
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java280
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java261
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java659
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java603
-rw-r--r--src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java369
-rw-r--r--src/test/java/org/elasticsearch/search/geo/gzippedmap.jsonbin0 -> 7740 bytes
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java53
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java39
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java105
-rw-r--r--src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java2703
-rw-r--r--src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java120
-rw-r--r--src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java246
-rw-r--r--src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java57
-rw-r--r--src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java108
-rw-r--r--src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java375
-rw-r--r--src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java2179
-rw-r--r--src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java464
-rw-r--r--src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java105
-rw-r--r--src/test/java/org/elasticsearch/search/scan/SearchScanTests.java88
-rw-r--r--src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java158
-rw-r--r--src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java453
-rw-r--r--src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java198
-rw-r--r--src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java1533
-rw-r--r--src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java85
-rw-r--r--src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java158
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java1111
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java191
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java84
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java42
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java85
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java977
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java337
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java329
-rw-r--r--src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java403
-rw-r--r--src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java60
-rw-r--r--src/test/java/org/elasticsearch/similarity/SimilarityTests.java79
-rw-r--r--src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java120
-rw-r--r--src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java205
-rw-r--r--src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java154
-rw-r--r--src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java983
-rw-r--r--src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java56
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java55
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java93
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java316
-rw-r--r--src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java42
-rw-r--r--src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java87
-rw-r--r--src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java242
-rw-r--r--src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java73
-rw-r--r--src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java97
-rw-r--r--src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java106
-rw-r--r--src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java71
-rw-r--r--src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java121
-rw-r--r--src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java51
-rw-r--r--src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java35
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java77
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java102
-rw-r--r--src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java125
-rw-r--r--src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java97
-rw-r--r--src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java125
-rw-r--r--src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java367
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java115
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java237
-rw-r--r--src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java427
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java115
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java878
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java51
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java277
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java32
-rw-r--r--src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java48
-rw-r--r--src/test/java/org/elasticsearch/test/NodeSettingsSource.java76
-rw-r--r--src/test/java/org/elasticsearch/test/TestCluster.java1300
-rw-r--r--src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java33
-rw-r--r--src/test/java/org/elasticsearch/test/client/RandomizingClient.java431
-rw-r--r--src/test/java/org/elasticsearch/test/engine/MockEngineModule.java31
-rw-r--r--src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java222
-rw-r--r--src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java192
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java33
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java59
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java449
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java204
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java118
-rw-r--r--src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java62
-rw-r--r--src/test/java/org/elasticsearch/test/junit/annotations/Network.java34
-rw-r--r--src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java41
-rw-r--r--src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java106
-rw-r--r--src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java135
-rw-r--r--src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java37
-rw-r--r--src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java153
-rw-r--r--src/test/java/org/elasticsearch/test/rest/Stash.java117
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestClient.java243
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestException.java41
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/RestResponse.java88
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java40
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java40
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java186
-rw-r--r--src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java97
-rw-r--r--src/test/java/org/elasticsearch/test/rest/json/JsonPath.java111
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java79
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java107
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java598
-rw-r--r--src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java56
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java89
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java39
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java48
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java39
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java36
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java33
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java33
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java165
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java115
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java57
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java71
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/Assertion.java76
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/DoSection.java136
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java34
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java54
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java61
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java55
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java64
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java54
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java76
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java78
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SetSection.java52
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SetupSection.java60
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/SkipSection.java83
-rw-r--r--src/test/java/org/elasticsearch/test/rest/section/TestSection.java74
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestApi.java216
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java139
-rw-r--r--src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java66
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/Features.java60
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/FileUtils.java143
-rw-r--r--src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java87
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java41
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java174
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java393
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java116
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java150
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java97
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java197
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java536
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java77
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java125
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java117
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java273
-rw-r--r--src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java120
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java166
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java56
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java43
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java32
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java63
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java61
-rw-r--r--src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java32
-rw-r--r--src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java60
-rw-r--r--src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java42
-rw-r--r--src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java130
-rw-r--r--src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java218
-rw-r--r--src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java85
-rw-r--r--src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java875
-rw-r--r--src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java33
-rw-r--r--src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java141
-rw-r--r--src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java53
-rw-r--r--src/test/java/org/elasticsearch/tribe/TribeTests.java185
-rw-r--r--src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java182
-rw-r--r--src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java103
-rw-r--r--src/test/java/org/elasticsearch/update/UpdateTests.java503
-rw-r--r--src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java271
-rw-r--r--src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java80
-rw-r--r--src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java259
-rw-r--r--src/test/java/org/elasticsearch/watcher/FileWatcherTest.java386
857 files changed, 145126 insertions, 0 deletions
diff --git a/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java b/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java
new file mode 100644
index 0000000..bf890e8
--- /dev/null
+++ b/src/test/java/com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.carrotsearch.randomizedtesting;
+
+/**
+ * Exposes methods that allow to use a {@link RandomizedContext} without using a {@link RandomizedRunner}
+ * This was specifically needed by the REST tests since they run with a custom junit runner ({@link org.elasticsearch.test.rest.junit.RestTestSuiteRunner})
+ */
+public final class StandaloneRandomizedContext {
+
+ private StandaloneRandomizedContext() {
+
+ }
+
+ /**
+ * Creates a new {@link RandomizedContext} associated to the current thread
+ */
+ public static void createRandomizedContext(Class<?> testClass, Randomness runnerRandomness) {
+ //the randomized runner is passed in as null, which is fine as long as we don't try to access it afterwards
+ RandomizedContext randomizedContext = RandomizedContext.create(Thread.currentThread().getThreadGroup(), testClass, null);
+ randomizedContext.push(runnerRandomness.clone(Thread.currentThread()));
+ }
+
+ /**
+ * Destroys the {@link RandomizedContext} associated to the current thread
+ */
+ public static void disposeRandomizedContext() {
+ RandomizedContext.current().dispose();
+ }
+
+ public static void pushRandomness(Randomness randomness) {
+ RandomizedContext.current().push(randomness);
+ }
+
+ public static void popAndDestroy() {
+ RandomizedContext.current().popAndDestroy();
+ }
+
+ /**
+ * Returns the string formatted seed associated to the current thread's randomized context
+ */
+ public static String getSeedAsString() {
+ return SeedUtils.formatSeed(RandomizedContext.current().getRandomness().getSeed());
+ }
+
+ /**
+ * Util method to extract the seed out of a {@link Randomness} instance
+ */
+ public static long getSeed(Randomness randomness) {
+ return randomness.getSeed();
+ }
+}
diff --git a/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java b/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
new file mode 100644
index 0000000..893020f
--- /dev/null
+++ b/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import static org.hamcrest.Matchers.equalTo;
+/**
+ */
+
+public class TruncateTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName,
+ Reader reader) {
+ Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
+ return new TokenStreamComponents(t, new TruncateTokenFilter(t, 3));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "a bb ccc dddd eeeee");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("a"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("bb"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ccc"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ddd"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("eee"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java b/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
new file mode 100644
index 0000000..e1d49f3
--- /dev/null
+++ b/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class UniqueTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName,
+ Reader reader) {
+ Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
+ return new TokenStreamComponents(t, new UniqueTokenFilter(t));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "this test with test");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("this"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("test"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("with"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
new file mode 100644
index 0000000..1bdf4e1
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
+import org.apache.lucene.util.BytesRef;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+
+public class CustomPassageFormatterTests {
+
+ @Test
+ public void testSimpleFormat() {
+ String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new DefaultEncoder());
+
+ Passage[] passages = new Passage[3];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = end + 26;
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Passage passage3 = new Passage();
+ passage3.startOffset = passage2.endOffset;
+ passage3.endOffset = content.length();
+ passages[2] = passage3;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(3));
+ assertThat(fragments[0].getText(), equalTo("This is a really cool <em>highlighter</em>."));
+ assertThat(fragments[0].isHighlighted(), equalTo(true));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ assertThat(fragments[1].isHighlighted(), equalTo(true));
+ assertThat(fragments[2].getText(), equalTo("No matches here."));
+ assertThat(fragments[2].isHighlighted(), equalTo(false));
+ }
+
+ @Test
+ public void testHtmlEncodeFormat() {
+ String content = "<b>This is a really cool highlighter.</b> Postings highlighter gives nice snippets back.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new SimpleHTMLEncoder());
+
+ Passage[] passages = new Passage[2];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = content.length();
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(2));
+ assertThat(fragments[0].getText(), equalTo("&lt;b&gt;This is a really cool <em>highlighter</em>.&lt;&#x2F;b&gt;"));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
new file mode 100644
index 0000000..d33ece1
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
@@ -0,0 +1,487 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.search.highlight.HighlightUtils;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+@LuceneTestCase.SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom", "Lucene3x"})
+public class CustomPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
+
+ @Test
+ public void testDiscreteHighlightingPerValue() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value to perform highlighting on.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ //highlighting per value, considering whole values (simulating number_of_fragments=0)
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0);
+ highlighter.setBreakIterator(new WholeBreakIterator());
+
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is a test. Just a test <b>highlighting</b> from postings highlighter."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the second value to perform <b>highlighting</b> on."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the third value to test <b>highlighting</b> with postings."));
+
+
+ //let's try without whole break iterator as well, to prove that highlighting works the same when working per value (not optimized though)
+ highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0);
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("Just a test <b>highlighting</b> from postings highlighter."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the second value to perform <b>highlighting</b> on."));
+
+ snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is the third value to test <b>highlighting</b> with postings."));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that scoring works properly even when using discrete per value highlighting
+ */
+ @Test
+ public void testDiscreteHighlightingScoring() throws Exception {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ //good position but only one match
+ final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter.";
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ //two matches, not the best snippet due to its length though
+ final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ //two matches and short, will be scored highest
+ final String thirdValue = "This is highlighting the third short highlighting value.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ //one match, same as first but at the end, will be scored lower due to its position
+ final String fourthValue = "Just a test4 highlighting from postings highlighter.";
+ Field body4 = new Field("body", "", offsetsType);
+ doc.add(body4);
+ body4.setStringValue(fourthValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+
+ String firstHlValue = "Just a test1 <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to perform <b>highlighting</b> on a longer text that gets scored lower.";
+ String thirdHlValue = "This is <b>highlighting</b> the third short <b>highlighting</b> value.";
+ String fourthHlValue = "Just a test4 <b>highlighting</b> from postings highlighter.";
+
+
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+ fieldValues.add(fourthValue);
+
+ boolean mergeValues = true;
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+
+ assertThat(snippets.length, equalTo(4));
+
+ assertThat(snippets[0].getText(), equalTo(firstHlValue));
+ assertThat(snippets[1].getText(), equalTo(secondHlValue));
+ assertThat(snippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(snippets[3].getText(), equalTo(fourthHlValue));
+
+
+ //Let's highlight each separate value and check how the snippets are scored
+ mergeValues = false;
+ highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ List<Snippet> snippets2 = new ArrayList<Snippet>();
+ for (int i = 0; i < fieldValues.size(); i++) {
+ snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, searcher, docId, 5)));
+ }
+
+ assertThat(snippets2.size(), equalTo(4));
+ assertThat(snippets2.get(0).getText(), equalTo(firstHlValue));
+ assertThat(snippets2.get(1).getText(), equalTo(secondHlValue));
+ assertThat(snippets2.get(2).getText(), equalTo(thirdHlValue));
+ assertThat(snippets2.get(3).getText(), equalTo(fourthHlValue));
+
+ Comparator <Snippet> comparator = new Comparator<Snippet>() {
+ @Override
+ public int compare(Snippet o1, Snippet o2) {
+ return (int)Math.signum(o1.getScore() - o2.getScore());
+ }
+ };
+
+ //sorting both groups of snippets
+ Arrays.sort(snippets, comparator);
+ Collections.sort(snippets2, comparator);
+
+ //checking that the snippets are in the same order, regardless of whether we used per value discrete highlighting or not
+ //we can't compare the scores directly since they are slightly different due to the multiValued separator added when merging values together
+ //That causes slightly different lengths and start offsets, thus a slightly different score.
+ //Anyways, that's not an issue. What's important is that the score is computed the same way, so that the produced order is always the same.
+ for (int i = 0; i < snippets.length; i++) {
+ assertThat(snippets[i].getText(), equalTo(snippets2.get(i).getText()));
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that we produce the same snippets and scores when manually merging values in our own custom highlighter rather than using the built-in code
+ */
+ @Test
+ public void testMergeValuesScoring() throws Exception {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ //good position but only one match
+ final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter.";
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ //two matches, not the best snippet due to its length though
+ final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ //two matches and short, will be scored highest
+ final String thirdValue = "This is highlighting the third short highlighting value.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ //one match, same as first but at the end, will be scored lower due to its position
+ final String fourthValue = "Just a test4 highlighting from postings highlighter.";
+ Field body4 = new Field("body", "", offsetsType);
+ doc.add(body4);
+ body4.setStringValue(fourthValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+
+ String firstHlValue = "Just a test1 <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to perform <b>highlighting</b> on a longer text that gets scored lower.";
+ String thirdHlValue = "This is <b>highlighting</b> the third short <b>highlighting</b> value.";
+ String fourthHlValue = "Just a test4 <b>highlighting</b> from postings highlighter.";
+
+
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ BytesRef[] queryTerms = filterTerms(extractTerms(query), "body", true);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> fieldValues = new ArrayList<Object>();
+ fieldValues.add(firstValue);
+ fieldValues.add(secondValue);
+ fieldValues.add(thirdValue);
+ fieldValues.add(fourthValue);
+
+ boolean mergeValues = true;
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5);
+
+ assertThat(snippets.length, equalTo(4));
+
+ assertThat(snippets[0].getText(), equalTo(firstHlValue));
+ assertThat(snippets[1].getText(), equalTo(secondHlValue));
+ assertThat(snippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(snippets[3].getText(), equalTo(fourthHlValue));
+
+
+ //testing now our fork / normal postings highlighter, which merges multiple values together using the paragraph separator
+ XPostingsHighlighter highlighter2 = new XPostingsHighlighter(Integer.MAX_VALUE - 1) {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ return HighlightUtils.PARAGRAPH_SEPARATOR;
+ }
+
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ }
+ };
+
+ Map<String, Object[]> highlightMap = highlighter2.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, new int[]{docId}, new int[]{5});
+ Object[] objects = highlightMap.get("body");
+ assertThat(objects, notNullValue());
+ assertThat(objects.length, equalTo(1));
+ Snippet[] normalSnippets = (Snippet[])objects[0];
+
+ assertThat(normalSnippets.length, equalTo(4));
+
+ assertThat(normalSnippets[0].getText(), equalTo(firstHlValue));
+ assertThat(normalSnippets[1].getText(), equalTo(secondHlValue));
+ assertThat(normalSnippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(normalSnippets[3].getText(), equalTo(fourthHlValue));
+
+
+ for (int i = 0; i < normalSnippets.length; i++) {
+ Snippet normalSnippet = snippets[0];
+ Snippet customSnippet = normalSnippets[0];
+ assertThat(customSnippet.getText(), equalTo(normalSnippet.getText()));
+ assertThat(customSnippet.getScore(), equalTo(normalSnippet.getScore()));
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testRequireFieldMatch() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore.";
+ body.setStringValue(firstValue);
+ none.setStringValue(firstValue);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ SortedSet<Term> queryTerms = extractTerms(query);
+
+ IndexSearcher searcher = newSearcher(ir);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> values = new ArrayList<Object>();
+ values.add(firstValue);
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+
+ //no snippets with simulated require field match (we filter the terms ourselves)
+ boolean requireFieldMatch = true;
+ BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch);
+ Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(0));
+
+
+ highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+ //one snippet without require field match, just passing in the query terms with no filtering on our side
+ requireFieldMatch = false;
+ filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch);
+ snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("Just a test <b>highlighting</b> from postings."));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNoMatchSize() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore.";
+ body.setStringValue(firstValue);
+ none.setStringValue(firstValue);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ SortedSet<Term> queryTerms = extractTerms(query);
+
+ IndexSearcher searcher = newSearcher(ir);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ List<Object> values = new ArrayList<Object>();
+ values.add(firstValue);
+
+ BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", true);
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0);
+ Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(0));
+
+ highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, atLeast(1));
+ snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is a test."));
+
+ ir.close();
+ dir.close();
+ }
+
+ private static SortedSet<Term> extractTerms(Query query) {
+ SortedSet<Term> queryTerms = new TreeSet<Term>();
+ query.extractTerms(queryTerms);
+ return queryTerms;
+ }
+
+ private static BytesRef[] filterTerms(SortedSet<Term> queryTerms, String field, boolean requireFieldMatch) {
+ SortedSet<Term> fieldTerms;
+ if (requireFieldMatch) {
+ Term floor = new Term(field, "");
+ Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
+ fieldTerms = queryTerms.subSet(floor, ceiling);
+ } else {
+ fieldTerms = queryTerms;
+ }
+
+ BytesRef terms[] = new BytesRef[fieldTerms.size()];
+ int termUpto = 0;
+ for(Term term : fieldTerms) {
+ terms[termUpto++] = term.bytes();
+ }
+
+ return terms;
+ }
+}
diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java
new file mode 100644
index 0000000..6f63b72
--- /dev/null
+++ b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java
@@ -0,0 +1,1693 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.text.BreakIterator;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.*;
+
+@LuceneTestCase.SuppressCodecs({"MockFixedIntBlock", "MockVariableIntBlock", "MockSep", "MockRandom", "Lucene3x"})
+public class XPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
+
+ /*
+ Tests changes needed to make possible to perform discrete highlighting.
+ We want to highlight every field value separately in case of multiple values, at least when needing to return the whole field content
+ This is needed to be able to get back a single snippet per value when number_of_fragments=0
+ */
+ @Test
+ public void testDiscreteHighlightingPerValue() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value to perform highlighting on.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+
+ String firstHlValue = "This is a test. Just a test <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second value to perform <b>highlighting</b> on.";
+ String thirdHlValue = "This is the third value to test <b>highlighting</b> with postings.";
+
+ //default behaviour: using the WholeBreakIterator, despite the multi valued paragraph separator we get back a single snippet for multiple values
+ assertThat(snippets[0], equalTo(firstHlValue + (char)8233 + secondHlValue + (char)8233 + thirdHlValue));
+
+
+
+ highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ //third call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_secondValueWithoutMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second value without matches.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the third value to test highlighting with postings.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ String firstHlValue = "This is a test. Just a test <b>highlighting</b> from postings highlighter.";
+ String thirdHlValue = "This is the third value to test <b>highlighting</b> with postings.";
+ //default behaviour: using the WholeBreakIterator, despite the multi valued paragraph separator we get back a single snippet for multiple values
+ //but only the first and the third value are returned since there are no matches in the second one.
+ assertThat(snippets[0], equalTo(firstHlValue + (char)8233 + secondValue + (char)8233 + thirdHlValue));
+
+
+ highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now nothing back because there's nothing to highlight in the second value
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], nullValue());
+
+ //third call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_MultipleMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is a highlighting test. Just a test highlighting from postings highlighter.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the second highlighting value to test highlighting with postings.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ String firstHlValue = "This is a <b>highlighting</b> test. Just a test <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to test <b>highlighting</b> with postings.";
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now only the first value properly highlighted as we wish
+ String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(firstHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testDiscreteHighlightingPerValue_MultipleQueryTerms() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ final String firstValue = "This is the first sentence. This is the second sentence.";
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ final String secondValue = "This is the third sentence. This is the fourth sentence.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ final String thirdValue = "This is the fifth sentence";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ BooleanQuery query = new BooleanQuery();
+ query.add(new BooleanClause(new TermQuery(new Term("body", "third")), BooleanClause.Occur.SHOULD));
+ query.add(new BooleanClause(new TermQuery(new Term("body", "seventh")), BooleanClause.Occur.SHOULD));
+ query.add(new BooleanClause(new TermQuery(new Term("body", "fifth")), BooleanClause.Occur.SHOULD));
+ query.setMinimumNumberShouldMatch(1);
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ String secondHlValue = "This is the <b>third</b> sentence. This is the fourth sentence.";
+ String thirdHlValue = "This is the <b>fifth</b> sentence";
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
+ Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, secondValue.length() + 1).iterator();
+
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ return new String[][]{new String[]{valuesIterator.next()}};
+ }
+
+ @Override
+ protected int getOffsetForCurrentValue(String field, int docId) {
+ return offsetsIterator.next();
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+
+ //first call using the WholeBreakIterator, we get now null as the first value doesn't hold any match
+ String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], nullValue());
+
+ //second call using the WholeBreakIterator, we get now only the second value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(secondHlValue));
+
+ //second call using the WholeBreakIterator, we get now only the third value properly highlighted as we wish
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0], equalTo(thirdHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ The following are tests that we added to make sure that certain behaviours are possible using the postings highlighter
+ They don't require our forked version, but only custom versions of methods that can be overridden and are already exposed to subclasses
+ */
+
+ /*
+ Tests that it's possible to obtain different fragments per document instead of a big string of concatenated fragments.
+ We use our own PassageFormatter for that and override the getFormatter method.
+ */
+ @Test
+ public void testCustomPassageFormatterMultipleFragments() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 5);
+ assertThat(snippets.length, equalTo(1));
+ //default behaviour that we want to change
+ assertThat(snippets[0], equalTo("This <b>test</b> is another test. ... <b>Test</b> <b>test</b> <b>test</b> test."));
+
+
+ final CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return passageFormatter;
+ }
+ };
+
+ final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
+ int docids[] = new int[scoreDocs.length];
+ int maxPassages[] = new int[scoreDocs.length];
+ for (int i = 0; i < docids.length; i++) {
+ docids[i] = scoreDocs[i].doc;
+ maxPassages[i] = 5;
+ }
+ Map<String, Object[]> highlights = highlighter.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, docids, maxPassages);
+ assertThat(highlights, notNullValue());
+ assertThat(highlights.size(), equalTo(1));
+ Object[] objectSnippets = highlights.get("body");
+ assertThat(objectSnippets, notNullValue());
+ assertThat(objectSnippets.length, equalTo(1));
+ assertThat(objectSnippets[0], instanceOf(Snippet[].class));
+
+ Snippet[] snippetsSnippet = (Snippet[]) objectSnippets[0];
+ assertThat(snippetsSnippet.length, equalTo(2));
+ //multiple fragments as we wish
+ assertThat(snippetsSnippet[0].getText(), equalTo("This <b>test</b> is another test."));
+ assertThat(snippetsSnippet[1].getText(), equalTo("<b>Test</b> <b>test</b> <b>test</b> test."));
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that it's possible to return no fragments when there's nothing to highlight
+ We do that by overriding the getEmptyHighlight method
+ */
+ @Test
+ public void testHighlightWithNoMatches() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ none.setStringValue(body.stringValue());
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ none.setStringValue(body.stringValue());
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("none", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(2));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 1);
+ //Two null snippets if there are no matches (thanks to our own custom passage formatter)
+ assertThat(snippets.length, equalTo(2));
+ //default behaviour: returns the first sentence with num passages = 1
+ assertThat(snippets[0], equalTo("This is a test. "));
+ assertThat(snippets[1], equalTo("Highlighting the first term. "));
+
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ //Two null snippets if there are no matches, as we wish
+ assertThat(snippets.length, equalTo(2));
+ assertThat(snippets[0], nullValue());
+ assertThat(snippets[1], nullValue());
+
+ ir.close();
+ dir.close();
+ }
+
+ /*
+ Tests that it's possible to avoid having fragments that span across different values
+ We do that by overriding the getMultiValuedSeparator and using a proper separator between values
+ */
+ @Test
+ public void testCustomMultiValuedSeparator() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings");
+
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue("highlighter.");
+ iw.addDocument(doc);
+
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ //default behaviour: getting a fragment that spans across different values
+ assertThat(snippets[0], equalTo("Just a test <b>highlighting</b> from postings highlighter."));
+
+
+ highlighter = new XPostingsHighlighter() {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
+ return 8233;
+ }
+ };
+ snippets = highlighter.highlight("body", query, searcher, topDocs);
+ assertThat(snippets.length, equalTo(1));
+ //getting a fragment that doesn't span across different values since we used the paragraph separator between the different values
+ assertThat(snippets[0], equalTo("Just a test <b>highlighting</b> from postings" + (char)8233));
+
+ ir.close();
+ dir.close();
+ }
+
+
+
+
+ /*
+ The following are all the existing postings highlighter tests, to make sure we don't have regression in our own fork
+ */
+
+ @Test
+ public void testBasics() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ public void testFormatWithMatchExceedingContentLength2() throws Exception {
+
+ String bodyText = "123 TEST 01234 TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ assertEquals("123 <b>TEST</b> 01234 TE", snippets[0]);
+ }
+
+ public void testFormatWithMatchExceedingContentLength3() throws Exception {
+
+ String bodyText = "123 5678 01234 TEST TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ assertEquals("123 5678 01234 TE", snippets[0]);
+ }
+
+ public void testFormatWithMatchExceedingContentLength() throws Exception {
+
+ String bodyText = "123 5678 01234 TEST";
+
+ String[] snippets = formatWithMatchExceedingContentLength(bodyText);
+
+ assertEquals(1, snippets.length);
+ // LUCENE-5166: no snippet
+ assertEquals("123 5678 01234 TE", snippets[0]);
+ }
+
+ private String[] formatWithMatchExceedingContentLength(String bodyText) throws IOException {
+
+ int maxLength = 17;
+
+ final Analyzer analyzer = new MockAnalyzer(random());
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ final FieldType fieldType = new FieldType(TextField.TYPE_STORED);
+ fieldType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ final Field body = new Field("body", bodyText, fieldType);
+
+ Document doc = new Document();
+ doc.add(body);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ Query query = new TermQuery(new Term("body", "test"));
+
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(maxLength);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+
+
+ ir.close();
+ dir.close();
+ return snippets;
+ }
+
+ // simple test highlighting last word.
+ public void testHighlightLastWord() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // simple test with one sentence documents.
+ @Test
+ public void testOneSentence() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test.");
+ iw.addDocument(doc);
+ body.setStringValue("Test a one sentence document.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("This is a <b>test</b>.", snippets[0]);
+ assertEquals("<b>Test</b> a one sentence document.", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // simple test with multiple values that make a result longer than maxLength.
+ @Test
+ public void testMaxLengthWithMultivalue() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ for(int i = 0; i < 3 ; i++) {
+ Field body = new Field("body", "", offsetsType);
+ body.setStringValue("This is a multivalued field");
+ doc.add(body);
+ }
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(40);
+ Query query = new TermQuery(new Term("body", "field"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertTrue("Snippet should have maximum 40 characters plus the pre and post tags",
+ snippets[0].length() == (40 + "<b></b>".length()));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleFields() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field title = new Field("title", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("I am hoping for the best.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ title.setStringValue("But best may not be good enough.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "highlighting")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("title", "best")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ Map<String,String[]> snippets = highlighter.highlightFields(new String [] { "body", "title" }, query, searcher, topDocs);
+ assertEquals(2, snippets.size());
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets.get("body")[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets.get("body")[1]);
+ assertEquals("I am hoping for the <b>best</b>.", snippets.get("title")[0]);
+ assertEquals("But <b>best</b> may not be good enough.", snippets.get("title")[1]);
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleTerms() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "highlighting")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "just")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "first")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(2, snippets.length);
+ assertEquals("<b>Just</b> a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the <b>first</b> term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultiplePassages() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(2, snippets.length);
+ assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", snippets[0]);
+ assertEquals("This <b>test</b> is another <b>test</b>. ... <b>Test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testUserFailedToIndexOffsets() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ Field body = new Field("body", "", positionsType);
+ Field title = new StringField("title", "", Field.Store.YES);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("test");
+ iw.addDocument(doc);
+ body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
+ title.setStringValue("test");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ try {
+ highlighter.highlight("body", query, searcher, topDocs, 2);
+ fail("did not hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+
+ try {
+ highlighter.highlight("title", new TermQuery(new Term("title", "test")), searcher, topDocs, 2);
+ fail("did not hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBuddhism() throws Exception {
+ String text = "This eight-volume set brings together seminal papers in Buddhist studies from a vast " +
+ "range of academic disciplines published over the last forty years. With a new introduction " +
+ "by the editor, this collection is a unique and unrivalled research resource for both " +
+ "student and scholar. Coverage includes: - Buddhist origins; early history of Buddhism in " +
+ "South and Southeast Asia - early Buddhist Schools and Doctrinal History; Theravada Doctrine " +
+ "- the Origins and nature of Mahayana Buddhism; some Mahayana religious topics - Abhidharma " +
+ "and Madhyamaka - Yogacara, the Epistemological tradition, and Tathagatagarbha - Tantric " +
+ "Buddhism (Including China and Japan); Buddhism in Nepal and Tibet - Buddhism in South and " +
+ "Southeast Asia, and - Buddhism in China, East Asia, and Japan.";
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ PhraseQuery query = new PhraseQuery();
+ query.add(new Term("body", "buddhist"));
+ query.add(new Term("body", "origins"));
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertTrue(snippets[0].contains("<b>Buddhist</b> <b>origins</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCuriousGeorge() throws Exception {
+ String text = "It’s the formula for success for preschoolers—Curious George and fire trucks! " +
+ "Curious George and the Firefighters is a story based on H. A. and Margret Rey’s " +
+ "popular primate and painted in the original watercolor and charcoal style. " +
+ "Firefighters are a famously brave lot, but can they withstand a visit from one curious monkey?";
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ PhraseQuery query = new PhraseQuery();
+ query.add(new Term("body", "curious"));
+ query.add(new Term("body", "george"));
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertFalse(snippets[0].contains("<b>Curious</b>Curious"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCambridgeMA() throws Exception {
+ BufferedReader r = new BufferedReader(new InputStreamReader(
+ this.getClass().getResourceAsStream("CambridgeMA.utf8"), "UTF-8"));
+ String text = r.readLine();
+ r.close();
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", text, positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "porter")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "square")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("body", "massachusetts")), BooleanClause.Occur.SHOULD);
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(Integer.MAX_VALUE-1);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertTrue(snippets[0].contains("<b>Square</b>"));
+ assertTrue(snippets[0].contains("<b>Porter</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testPassageRanking() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. ... Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBooleanMustNot() throws Exception {
+ Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
+ FieldType positionsType = new FieldType(TextField.TYPE_STORED);
+ positionsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "This sentence has both terms. This sentence has only terms.", positionsType);
+ Document document = new Document();
+ document.add(body);
+ iw.addDocument(document);
+ IndexReader ir = iw.getReader();
+ iw.close();
+ IndexSearcher searcher = newSearcher(ir);
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "terms")), BooleanClause.Occur.SHOULD);
+ BooleanQuery query2 = new BooleanQuery();
+ query.add(query2, BooleanClause.Occur.SHOULD);
+ query2.add(new TermQuery(new Term("body", "both")), BooleanClause.Occur.MUST_NOT);
+ TopDocs topDocs = searcher.search(query, 10);
+ assertEquals(1, topDocs.totalHits);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(Integer.MAX_VALUE-1);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertFalse(snippets[0].contains("<b>both</b>"));
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testHighlightAllText() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. Just highlighting from postings. This is also a much sillier <b>test</b>. Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testSpecificDocIDs() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+ body.setStringValue("Highlighting the first term. Hope it works.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(2, topDocs.totalHits);
+ ScoreDoc[] hits = topDocs.scoreDocs;
+ int[] docIDs = new int[2];
+ docIDs[0] = hits[0].doc;
+ docIDs[1] = hits[1].doc;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 1 }).get("body");
+ assertEquals(2, snippets.length);
+ assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+ assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testCustomFieldValueSource() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ Document doc = new Document();
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_NOT_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ final String text = "This is a test. Just highlighting from postings. This is also a much sillier test. Feel free to test test test test test test test.";
+ Field body = new Field("body", text, offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
+ assertThat(fields.length, equalTo(1));
+ assertThat(docids.length, equalTo(1));
+ String[][] contents = new String[1][1];
+ contents[0][0] = text;
+ return contents;
+ }
+
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+
+ Query query = new TermQuery(new Term("body", "test"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a <b>test</b>. Just highlighting from postings. This is also a much sillier <b>test</b>. Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter returns first N sentences if
+ * there were no hits. */
+ @Test
+ public void testEmptyHighlights() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals("test this is. another sentence this test has. ", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter we can customize how emtpy
+ * highlight is returned. */
+ @Test
+ public void testCustomEmptyHighlights() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ public Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
+ return new Passage[0];
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter returns whole text when there
+ * are no hits and BreakIterator is null. */
+ @Test
+ public void testEmptyHighlightsWhole() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
+ @Override
+ protected BreakIterator getBreakIterator(String field) {
+ return new WholeBreakIterator();
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals("test this is. another sentence this test has. far away is that planet.", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** Make sure highlighter is OK with entirely missing
+ * field. */
+ @Test
+ public void testFieldIsMissing() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
+ doc.add(body);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("bogus", "highlighting"));
+ int[] docIDs = new int[] {0};
+ String snippets[] = highlighter.highlightFields(new String[] {"bogus"}, query, searcher, docIDs, new int[] { 2 }).get("bogus");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testFieldIsJustSpace() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ Document doc = new Document();
+ doc.add(new Field("body", " ", offsetsType));
+ doc.add(new Field("id", "id", offsetsType));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("body", "something", offsetsType));
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ int docID = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[1];
+ docIDs[0] = docID;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertEquals(" ", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testFieldIsEmptyString() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ Document doc = new Document();
+ doc.add(new Field("body", "", offsetsType));
+ doc.add(new Field("id", "id", offsetsType));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("body", "something", offsetsType));
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ int docID = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ int[] docIDs = new int[1];
+ docIDs[0] = docID;
+ String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
+ assertEquals(1, snippets.length);
+ assertNull(snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleDocs() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ int numDocs = atLeast(100);
+ for(int i=0;i<numDocs;i++) {
+ Document doc = new Document();
+ String content = "the answer is " + i;
+ if ((i & 1) == 0) {
+ content += " some more terms";
+ }
+ doc.add(new Field("body", content, offsetsType));
+ doc.add(newStringField("id", ""+i, Field.Store.YES));
+ iw.addDocument(doc);
+
+ if (random().nextInt(10) == 2) {
+ iw.commit();
+ }
+ }
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ Query query = new TermQuery(new Term("body", "answer"));
+ TopDocs hits = searcher.search(query, numDocs);
+ assertEquals(numDocs, hits.totalHits);
+
+ String snippets[] = highlighter.highlight("body", query, searcher, hits);
+ assertEquals(numDocs, snippets.length);
+ for(int hit=0;hit<numDocs;hit++) {
+ Document doc = searcher.doc(hits.scoreDocs[hit].doc);
+ int id = Integer.parseInt(doc.get("id"));
+ String expected = "the <b>answer</b> is " + id;
+ if ((id & 1) == 0) {
+ expected += " some more terms";
+ }
+ assertEquals(expected, snippets[hit]);
+ }
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testMultipleSnippetSizes() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field title = new Field("title", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(title);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ title.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter();
+ BooleanQuery query = new BooleanQuery();
+ query.add(new TermQuery(new Term("body", "test")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("title", "test")), BooleanClause.Occur.SHOULD);
+ Map<String,String[]> snippets = highlighter.highlightFields(new String[] { "title", "body" }, query, searcher, new int[] { 0 }, new int[] { 1, 2 });
+ String titleHighlight = snippets.get("title")[0];
+ String bodyHighlight = snippets.get("body")[0];
+ assertEquals("This is a <b>test</b>. ", titleHighlight);
+ assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", bodyHighlight);
+ ir.close();
+ dir.close();
+ }
+
+ public void testEncode() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from <i>postings</i>. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ PostingsHighlighter highlighter = new PostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new DefaultPassageFormatter("<b>", "</b>", "... ", true);
+ }
+ };
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("Just&#32;a&#32;test&#32;<b>highlighting</b>&#32;from&#32;&lt;i&gt;postings&lt;&#x2F;i&gt;&#46;&#32;", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ /** customizing the gap separator to force a sentence break */
+ public void testGapSeparator() throws Exception {
+ Directory dir = newDirectory();
+ // use simpleanalyzer for more natural tokenization (else "test." is a token)
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Document doc = new Document();
+
+ Field body1 = new Field("body", "", offsetsType);
+ body1.setStringValue("This is a multivalued field");
+ doc.add(body1);
+
+ Field body2 = new Field("body", "", offsetsType);
+ body2.setStringValue("This is something different");
+ doc.add(body2);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ PostingsHighlighter highlighter = new PostingsHighlighter() {
+ @Override
+ protected char getMultiValuedSeparator(String field) {
+ assert field.equals("body");
+ return '\u2029';
+ }
+ };
+ Query query = new TermQuery(new Term("body", "field"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+ assertEquals(1, snippets.length);
+ assertEquals("This is a multivalued <b>field</b>\u2029", snippets[0]);
+
+ ir.close();
+ dir.close();
+ }
+
+ // LUCENE-4906
+ public void testObjectFormatter() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+
+ body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ IndexSearcher searcher = newSearcher(ir);
+ XPostingsHighlighter highlighter = new XPostingsHighlighter() {
+ @Override
+ protected PassageFormatter getFormatter(String field) {
+ return new PassageFormatter() {
+ PassageFormatter defaultFormatter = new DefaultPassageFormatter();
+
+ @Override
+ public String[] format(Passage passages[], String content) {
+ // Just turns the String snippet into a length 2
+ // array of String
+ return new String[] {"blah blah", defaultFormatter.format(passages, content).toString()};
+ }
+ };
+ }
+ };
+
+ Query query = new TermQuery(new Term("body", "highlighting"));
+ TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+ assertEquals(1, topDocs.totalHits);
+ int[] docIDs = new int[1];
+ docIDs[0] = topDocs.scoreDocs[0].doc;
+ Map<String,Object[]> snippets = highlighter.highlightFieldsAsObjects(new String[]{"body"}, query, searcher, docIDs, new int[] {1});
+ Object[] bodySnippets = snippets.get("body");
+ assertEquals(1, bodySnippets.length);
+ assertTrue(Arrays.equals(new String[] {"blah blah", "Just a test <b>highlighting</b> from postings. "}, (String[]) bodySnippets[0]));
+
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
new file mode 100644
index 0000000..b778b73
--- /dev/null
+++ b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.util;
+
+import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
+import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
+import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
+import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.rules.RuleChain;
+import org.junit.rules.TestRule;
+import org.junit.runner.RunWith;
+
+import java.io.Closeable;
+import java.io.File;
+import java.lang.annotation.*;
+import java.lang.reflect.Method;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.logging.Logger;
+
+@TestMethodProviders({
+ LuceneJUnit3MethodProvider.class,
+ JUnit4MethodProvider.class
+})
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@RunWith(value = com.carrotsearch.randomizedtesting.RandomizedRunner.class)
+@SuppressCodecs(value = "Lucene3x")
+
+// NOTE: this class is in o.a.lucene.util since it uses some classes that are related
+// to the test framework that didn't make sense to copy but are package private access
+public abstract class AbstractRandomizedTest extends RandomizedTest {
+ /**
+ * Annotation for integration tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = true, sysProperty = SYSPROP_INTEGRATION)
+ public @interface IntegrationTests {
+ }
+
+ // --------------------------------------------------------------------
+ // Test groups, system properties and other annotations modifying tests
+ // --------------------------------------------------------------------
+
+ /**
+ * @see #ignoreAfterMaxFailures
+ */
+ public static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
+
+ /**
+ * @see #ignoreAfterMaxFailures
+ */
+ public static final String SYSPROP_FAILFAST = "tests.failfast";
+
+ public static final String SYSPROP_INTEGRATION = "tests.integration";
+
+ // -----------------------------------------------------------------
+ // Truly immutable fields and constants, initialized once and valid
+ // for all suites ever since.
+ // -----------------------------------------------------------------
+
+ /**
+ * Use this constant when creating Analyzers and any other version-dependent stuff.
+ * <p><b>NOTE:</b> Change this when development starts for new Lucene version:
+ */
+ public static final Version TEST_VERSION_CURRENT = Lucene.VERSION;
+
+ /**
+ * True if and only if tests are run in verbose mode. If this flag is false
+ * tests are not expected to print any messages.
+ */
+ public static final boolean VERBOSE = systemPropertyAsBoolean("tests.verbose", false);
+
+ /**
+ * A random multiplier which you should use when writing random tests:
+ * multiply it by the number of iterations to scale your tests (for nightly builds).
+ */
+ public static final int RANDOM_MULTIPLIER = systemPropertyAsInt("tests.multiplier", 1);
+
+ /**
+ * TODO: javadoc?
+ */
+ public static final String DEFAULT_LINE_DOCS_FILE = "europarl.lines.txt.gz";
+
+ /**
+ * the line file used by LineFileDocs
+ */
+ public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE);
+
+ /**
+ * Create indexes in this directory, optimally use a subdir, named after the test
+ */
+ public static final File TEMP_DIR;
+
+ static {
+ String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir"));
+ if (s == null)
+ throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'.");
+ TEMP_DIR = new File(s);
+ TEMP_DIR.mkdirs();
+ }
+
+ /**
+ * These property keys will be ignored in verification of altered properties.
+ *
+ * @see SystemPropertiesInvariantRule
+ * @see #ruleChain
+ * @see #classRules
+ */
+ private static final String[] IGNORED_INVARIANT_PROPERTIES = {
+ "user.timezone", "java.rmi.server.randomIDs", "sun.nio.ch.bugLevel",
+ "solr.directoryFactory", "solr.solr.home", "solr.data.dir" // these might be set by the LuceneTestCase -- ignore
+ };
+
+ // -----------------------------------------------------------------
+ // Fields initialized in class or instance rules.
+ // -----------------------------------------------------------------
+
+
+ // -----------------------------------------------------------------
+ // Class level (suite) rules.
+ // -----------------------------------------------------------------
+
+ /**
+ * Stores the currently class under test.
+ */
+ private static final TestRuleStoreClassName classNameRule;
+
+ /**
+ * Class environment setup rule.
+ */
+ static final TestRuleSetupAndRestoreClassEnv classEnvRule;
+
+ /**
+ * Suite failure marker (any error in the test or suite scope).
+ */
+ public final static TestRuleMarkFailure suiteFailureMarker =
+ new TestRuleMarkFailure();
+
+ /**
+ * Ignore tests after hitting a designated number of initial failures. This
+ * is truly a "static" global singleton since it needs to span the lifetime of all
+ * test classes running inside this JVM (it cannot be part of a class rule).
+ * <p/>
+ * <p>This poses some problems for the test framework's tests because these sometimes
+ * trigger intentional failures which add up to the global count. This field contains
+ * a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we
+ * dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}.
+ */
+ private static final AtomicReference<TestRuleIgnoreAfterMaxFailures> ignoreAfterMaxFailuresDelegate;
+ private static final TestRule ignoreAfterMaxFailures;
+
+ static {
+ int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE);
+ boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false);
+
+ if (failFast) {
+ if (maxFailures == Integer.MAX_VALUE) {
+ maxFailures = 1;
+ } else {
+ Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning(
+ "Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" +
+ " ignored.");
+ }
+ }
+
+ ignoreAfterMaxFailuresDelegate =
+ new AtomicReference<TestRuleIgnoreAfterMaxFailures>(
+ new TestRuleIgnoreAfterMaxFailures(maxFailures));
+ ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate);
+ }
+
+ /**
+ * Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See
+ * {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method
+ * is needed.
+ */
+ public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) {
+ return ignoreAfterMaxFailuresDelegate.getAndSet(newValue);
+ }
+
+ /**
+ * Max 10mb of static data stored in a test suite class after the suite is complete.
+ * Prevents static data structures leaking and causing OOMs in subsequent tests.
+ */
+ private final static long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024;
+
+ /**
+ * By-name list of ignored types like loggers etc.
+ */
+ private final static Set<String> STATIC_LEAK_IGNORED_TYPES =
+ Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(
+ EnumSet.class.getName())));
+
+ private final static Set<Class<?>> TOP_LEVEL_CLASSES =
+ Collections.unmodifiableSet(new HashSet<Class<?>>(Arrays.asList(
+ AbstractRandomizedTest.class, LuceneTestCase.class,
+ ElasticsearchIntegrationTest.class, ElasticsearchTestCase.class)));
+
+ /**
+ * This controls how suite-level rules are nested. It is important that _all_ rules declared
+ * in {@link LuceneTestCase} are executed in proper order if they depend on each
+ * other.
+ */
+ @ClassRule
+ public static TestRule classRules = RuleChain
+ .outerRule(new TestRuleIgnoreTestSuites())
+ .around(ignoreAfterMaxFailures)
+ .around(suiteFailureMarker)
+ .around(new TestRuleAssertionsRequired())
+ .around(new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) {
+ @Override
+ protected boolean accept(java.lang.reflect.Field field) {
+ // Don't count known classes that consume memory once.
+ if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) {
+ return false;
+ }
+ // Don't count references from ourselves, we're top-level.
+ if (TOP_LEVEL_CLASSES.contains(field.getDeclaringClass())) {
+ return false;
+ }
+ return super.accept(field);
+ }
+ })
+ .around(new NoClassHooksShadowingRule())
+ .around(new NoInstanceHooksOverridesRule() {
+ @Override
+ protected boolean verify(Method key) {
+ String name = key.getName();
+ return !(name.equals("setUp") || name.equals("tearDown"));
+ }
+ })
+ .around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
+ .around(classNameRule = new TestRuleStoreClassName())
+ .around(classEnvRule = new TestRuleSetupAndRestoreClassEnv());
+
+
+ // -----------------------------------------------------------------
+ // Test level rules.
+ // -----------------------------------------------------------------
+
+ /**
+ * Enforces {@link #setUp()} and {@link #tearDown()} calls are chained.
+ */
+ private TestRuleSetupTeardownChained parentChainCallRule = new TestRuleSetupTeardownChained();
+
+ /**
+ * Save test thread and name.
+ */
+ private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
+
+ /**
+ * Taint suite result with individual test failures.
+ */
+ private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
+
+ /**
+ * This controls how individual test rules are nested. It is important that
+ * _all_ rules declared in {@link LuceneTestCase} are executed in proper order
+ * if they depend on each other.
+ */
+ @Rule
+ public final TestRule ruleChain = RuleChain
+ .outerRule(testFailureMarker)
+ .around(ignoreAfterMaxFailures)
+ .around(threadAndTestNameRule)
+ .around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
+ .around(new TestRuleSetupAndRestoreInstanceEnv())
+ .around(new TestRuleFieldCacheSanity())
+ .around(parentChainCallRule);
+
+ // -----------------------------------------------------------------
+ // Suite and test case setup/ cleanup.
+ // -----------------------------------------------------------------
+
+ /**
+ * For subclasses to override. Overrides must call {@code super.setUp()}.
+ */
+ @Before
+ public void setUp() throws Exception {
+ parentChainCallRule.setupCalled = true;
+ }
+
+ /**
+ * For subclasses to override. Overrides must call {@code super.tearDown()}.
+ */
+ @After
+ public void tearDown() throws Exception {
+ parentChainCallRule.teardownCalled = true;
+ }
+
+
+ // -----------------------------------------------------------------
+ // Test facilities and facades for subclasses.
+ // -----------------------------------------------------------------
+
+ /**
+ * Registers a {@link Closeable} resource that should be closed after the test
+ * completes.
+ *
+ * @return <code>resource</code> (for call chaining).
+ */
+ public <T extends Closeable> T closeAfterTest(T resource) {
+ return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.TEST);
+ }
+
+ /**
+ * Registers a {@link Closeable} resource that should be closed after the suite
+ * completes.
+ *
+ * @return <code>resource</code> (for call chaining).
+ */
+ public static <T extends Closeable> T closeAfterSuite(T resource) {
+ return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.SUITE);
+ }
+
+ /**
+ * Return the current class being tested.
+ */
+ public static Class<?> getTestClass() {
+ return classNameRule.getTestClass();
+ }
+
+ /**
+ * Return the name of the currently executing test case.
+ */
+ public String getTestName() {
+ return threadAndTestNameRule.testMethodName;
+ }
+
+}
diff --git a/src/test/java/org/apache/lucene/util/SloppyMathTests.java b/src/test/java/org/apache/lucene/util/SloppyMathTests.java
new file mode 100644
index 0000000..81a2fb5
--- /dev/null
+++ b/src/test/java/org/apache/lucene/util/SloppyMathTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.util;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class SloppyMathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAccuracy() {
+ for (double lat1 = -89; lat1 <= 89; lat1+=1) {
+ final double lon1 = randomLongitude();
+
+ for (double i = -180; i <= 180; i+=1) {
+ final double lon2 = i;
+ final double lat2 = randomLatitude();
+
+ assertAccurate(lat1, lon1, lat2, lon2);
+ }
+ }
+ }
+
+ @Test
+ public void testSloppyMath() {
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-46.645, -171.057, -46.644, -171.058, DistanceUnit.METERS), closeTo(134.87709, maxError(134.87709)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-77.912, -81.173, -77.912, -81.171, DistanceUnit.METERS), closeTo(46.57161, maxError(46.57161)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(65.75, -20.708, 65.75, -20.709, DistanceUnit.METERS), closeTo(45.66996, maxError(45.66996)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(-86.9, 53.738, -86.9, 53.741, DistanceUnit.METERS), closeTo(18.03998, maxError(18.03998)));
+ assertThat(GeoDistance.SLOPPY_ARC.calculate(89.041, 115.93, 89.04, 115.946, DistanceUnit.METERS), closeTo(115.11711, maxError(115.11711)));
+
+ testSloppyMath(DistanceUnit.METERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.KILOMETERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.INCH, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.MILES, 0.01, 5, 45, 90);
+ }
+
+ private static double maxError(double distance) {
+ return distance / 1000.0;
+ }
+
+ private void testSloppyMath(DistanceUnit unit, double...deltaDeg) {
+ final double lat1 = randomLatitude();
+ final double lon1 = randomLongitude();
+ logger.info("testing SloppyMath with {} at \"{}, {}\"", unit, lat1, lon1);
+
+ for (int test = 0; test < deltaDeg.length; test++) {
+ for (int i = 0; i < 100; i++) {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ final double lat2 = Math.max(-89.0, Math.min(+89.0, lat1 + (randomDouble() - 0.5) * 2 * deltaDeg[test]));
+ final double lon2 = lon1 + (randomDouble() - 0.5) * 2 * deltaDeg[test];
+
+ final double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, unit);
+ final double dist = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, unit);
+
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", dist, closeTo(accurate, maxError(accurate)));
+ }
+ }
+ }
+
+ private static void assertAccurate(double lat1, double lon1, double lat2, double lon2) {
+ double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ double sloppy = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", sloppy, closeTo(accurate, maxError(accurate)));
+ }
+
+ private static final double randomLatitude() {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ return (getRandom().nextDouble() - 0.5) * 178.0;
+ }
+
+ private static final double randomLongitude() {
+ return (getRandom().nextDouble() - 0.5) * 360.0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
new file mode 100644
index 0000000..2c605f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.index.Index;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ElasticsearchExceptionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testStatus() {
+ ElasticsearchException exception = new ElasticsearchException("test");
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new RuntimeException());
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new RemoteTransportException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/VersionTests.java b/src/test/java/org/elasticsearch/VersionTests.java
new file mode 100644
index 0000000..54df22d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/VersionTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.Version.V_0_20_0;
+import static org.elasticsearch.Version.V_0_90_0;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+
+public class VersionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVersions() throws Exception {
+ assertThat(V_0_20_0.before(V_0_90_0), is(true));
+ assertThat(V_0_20_0.before(V_0_20_0), is(false));
+ assertThat(V_0_90_0.before(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.onOrBefore(V_0_90_0), is(true));
+ assertThat(V_0_20_0.onOrBefore(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrBefore(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.after(V_0_90_0), is(false));
+ assertThat(V_0_20_0.after(V_0_20_0), is(false));
+ assertThat(V_0_90_0.after(V_0_20_0), is(true));
+
+ assertThat(V_0_20_0.onOrAfter(V_0_90_0), is(false));
+ assertThat(V_0_20_0.onOrAfter(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrAfter(V_0_20_0), is(true));
+ }
+
+ @Test
+ public void testVersionConstantPresent() {
+ assertThat(Version.CURRENT, sameInstance(Version.fromId(Version.CURRENT.id)));
+ assertThat(Version.CURRENT.luceneVersion.ordinal(), equalTo(org.apache.lucene.util.Version.LUCENE_CURRENT.ordinal() - 1));
+ final int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion();
+ assertThat(version, sameInstance(Version.fromId(version.id)));
+ assertThat(version.luceneVersion, sameInstance(Version.fromId(version.id).luceneVersion));
+ }
+ }
+ @Test
+ public void testCURRENTIsLatest() {
+ final int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion();
+ if (version != Version.CURRENT) {
+ assertThat("Version: " + version + " should be before: " + Version.CURRENT + " but wasn't", version.before(Version.CURRENT), is(true));
+ }
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java b/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
new file mode 100644
index 0000000..215553d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.FilterBuilders.andFilter;
+import static org.elasticsearch.index.query.FilterBuilders.notFilter;
+import static org.elasticsearch.index.query.FilterBuilders.queryFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+/**
+ */
+public class HotThreadsTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {
+ /**
+ * This test just checks if nothing crashes or gets stuck etc.
+ */
+ createIndex("test");
+ final int iters = atLeast(2);
+ final AtomicBoolean hasErrors = new AtomicBoolean(false);
+ for (int i = 0; i < iters; i++) {
+ final String type;
+ NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = client().admin().cluster().prepareNodesHotThreads();
+ if (randomBoolean()) {
+ TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500));
+ nodesHotThreadsRequestBuilder.setInterval(timeValue);
+ }
+ if (randomBoolean()) {
+ nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500));
+ }
+ if (randomBoolean()) {
+ switch (randomIntBetween(0, 2)) {
+ case 2:
+ type = "cpu";
+ break;
+ case 1:
+ type = "wait";
+ break;
+ default:
+ type = "block";
+ break;
+ }
+ assertThat(type, notNullValue());
+ nodesHotThreadsRequestBuilder.setType(type);
+ } else {
+ type = null;
+ }
+ final CountDownLatch latch = new CountDownLatch(1);
+ nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {
+ @Override
+ public void onResponse(NodesHotThreadsResponse nodeHotThreads) {
+ boolean success = false;
+ try {
+ assertThat(nodeHotThreads, notNullValue());
+ Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();
+ assertThat(nodesMap.size(), equalTo(cluster().size()));
+ for (NodeHotThreads ht : nodeHotThreads) {
+ assertNotNull(ht.getHotThreads());
+ //logger.info(ht.getHotThreads());
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ hasErrors.set(true);
+ }
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.error("FAILED", e);
+ hasErrors.set(true);
+ latch.countDown();
+ fail();
+ }
+ });
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+ ensureSearchable();
+ while(latch.getCount() > 0) {
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andFilter(
+ queryFilter(matchAllQuery()),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ }
+ latch.await();
+ assertThat(hasErrors.get(), is(false));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
new file mode 100644
index 0000000..87edd65
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class ClusterStatsTests extends ElasticsearchIntegrationTest {
+
+ private void assertCounts(ClusterStatsNodes.Counts counts, int total, int masterOnly, int dataOnly, int masterData, int client) {
+ assertThat(counts.getTotal(), Matchers.equalTo(total));
+ assertThat(counts.getMasterOnly(), Matchers.equalTo(masterOnly));
+ assertThat(counts.getDataOnly(), Matchers.equalTo(dataOnly));
+ assertThat(counts.getMasterData(), Matchers.equalTo(masterData));
+ assertThat(counts.getClient(), Matchers.equalTo(client));
+ }
+
+ @Test
+ public void testNodeCounts() {
+ cluster().startNode();
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.data", false));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.master", false));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
+
+ cluster().startNode(ImmutableSettings.builder().put("node.client", true));
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
+ }
+
+
+ private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, int total, int primaries, double replicationFactor) {
+ assertThat(stats.getIndices(), Matchers.equalTo(indices));
+ assertThat(stats.getTotal(), Matchers.equalTo(total));
+ assertThat(stats.getPrimaries(), Matchers.equalTo(primaries));
+ assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor));
+ }
+
+ @Test
+ public void testIndicesShardStats() {
+ cluster().startNode();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+
+
+ prepareCreate("test1").setSettings("number_of_shards", 2, "number_of_replicas", 1).get();
+ ensureYellow();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
+
+ // add another node, replicas should get assigned
+ cluster().startNode();
+ ensureGreen();
+ index("test1", "type", "1", "f", "f");
+ refresh(); // make the doc visible
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
+
+ prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
+ ensureGreen();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2));
+ assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5);
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0));
+
+ }
+
+ @Test
+ public void testValuesSmokeScreen() {
+ cluster().ensureAtMostNumNodes(5);
+ cluster().ensureAtLeastNumNodes(1);
+ SigarService sigarService = cluster().getInstance(SigarService.class);
+ index("test1", "type", "1", "f", "f");
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000
+ assertThat(response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
+
+ assertThat(response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
+ assertThat(response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
+ if (sigarService.sigarAvailable()) {
+ // We only get those if we have sigar
+ assertThat(response.nodesStats.getOs().getAvailableProcessors(), Matchers.greaterThan(0));
+ assertThat(response.nodesStats.getOs().getAvailableMemory().bytes(), Matchers.greaterThan(0l));
+ assertThat(response.nodesStats.getOs().getCpus().size(), Matchers.greaterThan(0));
+ }
+ assertThat(response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
+ assertThat(response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true));
+ assertThat(response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0));
+
+ assertThat(response.nodesStats.getProcess().count, Matchers.greaterThan(0));
+ // 0 happens when not supported on platform
+ assertThat(response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L));
+ // these can be -1 if not supported on platform
+ assertThat(response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+ assertThat(response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java b/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
new file mode 100644
index 0000000..67ed2be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.hasSize;
+
+public class PutWarmerRequestTests extends ElasticsearchTestCase {
+
+ @Test // issue 4196
+ public void testThatValidationWithoutSpecifyingSearchRequestFails() {
+ PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo");
+ ActionRequestValidationException validationException = putWarmerRequest.validate();
+ assertThat(validationException.validationErrors(), hasSize(1));
+ assertThat(validationException.getMessage(), containsString("search request is missing"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
new file mode 100644
index 0000000..5a3c318
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import static org.hamcrest.Matchers.*;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+
+import org.junit.Test;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope= ElasticsearchIntegrationTest.Scope.SUITE, numNodes=1)
+public class BulkIntegrationTests extends ElasticsearchIntegrationTest{
+
+ @Test
+ public void testBulkIndexCreatesMapping() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json");
+ BulkRequestBuilder bulkBuilder = new BulkRequestBuilder(client());
+ bulkBuilder.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ bulkBuilder.execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ return mappingsResponse.getMappings().containsKey("logstash-2014.03.30");
+ } catch (Throwable t) {
+ return false;
+ }
+ }
+ });
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ return mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs");
+ } catch (Throwable t) {
+ return false;
+ }
+ }
+ });
+ ensureYellow();
+ GetMappingsResponse mappingsResponse = client().admin().indices().getMappings(new GetMappingsRequest()).get();
+ assertThat(mappingsResponse.mappings().size(), equalTo(1));
+ assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30"));
+ assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
new file mode 100644
index 0000000..9171fc4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class BulkRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleBulk1() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ // translate Windows line endings (\r\n) to standard ones (\n)
+ if (Constants.WINDOWS) {
+ bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
+ }
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ assertThat(((IndexRequest) bulkRequest.requests().get(0)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }").toBytes()));
+ assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class));
+ assertThat(((IndexRequest) bulkRequest.requests().get(2)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }").toBytes()));
+ }
+
+ @Test
+ public void testSimpleBulk2() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk2.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk3() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk3.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk4() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk4.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(4));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).id(), equalTo("1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().toUtf8(), equalTo("{\"field\":\"value\"}"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).id(), equalTo("0"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).type(), equalTo("type1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).script(), equalTo("counter += param1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptLang(), equalTo("js"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().size(), equalTo(1));
+ assertThat(((Integer) ((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().get("param1")), equalTo(1));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().toUtf8(), equalTo("{\"counter\":1}"));
+ }
+
+ @Test
+ public void testBulkAllowExplicitIndex() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ try {
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, null, null, false);
+ fail();
+ } catch (Exception e) {
+
+ }
+
+ bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json");
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, "test", null, false);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/bulk/bulk-log.json b/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
new file mode 100644
index 0000000..9c3663c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
@@ -0,0 +1,24 @@
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
new file mode 100644
index 0000000..cf76477
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
@@ -0,0 +1,5 @@
+{ "index":{"_index":"test","_type":"type1","_id":"1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
new file mode 100644
index 0000000..7cd4f99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
new file mode 100644
index 0000000..7cd4f99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
new file mode 100644
index 0000000..8b916b8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
@@ -0,0 +1,7 @@
+{ "update" : {"_id" : "1", "_retry_on_conflict" : 2} }
+{ "doc" : {"field" : "value"} }
+{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1" } }
+{ "script" : "counter += param1", "lang" : "js", "params" : {"param1" : 1}, "upsert" : {"counter" : 1}}
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json b/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
new file mode 100644
index 0000000..6ad5ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
@@ -0,0 +1,5 @@
+{ "index": {"_type": "type1","_id": "1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
new file mode 100644
index 0000000..3995061
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseBulkRequests() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json");
+ MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length, false);
+
+ assertThat(request.requests().size(), equalTo(6));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index2"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index3"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index4"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index5"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map()));
+
+ percolateRequest = request.requests().get(3);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index6"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("1"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index6"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(4);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index7"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("2"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index7"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(5);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index8"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("primary"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strict()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value4").map()));
+ }
+
+ @Test
+ public void testParseBulkRequests_defaults() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate2.json");
+ MultiPercolateRequest request = new MultiPercolateRequest();
+ request.indices("my-index1").documentType("my-type1").indicesOptions(IndicesOptions.lenient());
+ request.add(data, 0, data.length, false);
+
+ assertThat(request.requests().size(), equalTo(3));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenient()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
new file mode 100644
index 0000000..ceb4aca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
@@ -0,0 +1,12 @@
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : false}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"indices" : ["my-index2", "my-index3"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"count" : {"indices" : ["my-index4", "my-index5"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : "open,closed"}}
+{"doc" : {"field1" : "value3"}}
+{"percolate" : {"id" : "1", "index" : "my-index6", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : ["open", "closed"]}}
+{}
+{"count" : {"id" : "2", "index" : "my-index7", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local"}}
+{}
+{"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}}
+{"doc" : {"field1" : "value4"}}
diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
new file mode 100644
index 0000000..fa676cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
@@ -0,0 +1,6 @@
+{"percolate" : {"routing" : "my-routing-1", "preference" : "_local"}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"percolate" : {}}
+{"doc" : {"field1" : "value3"}}
diff --git a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
new file mode 100644
index 0000000..103ab7d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class MultiSearchRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleAdd() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(5));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true)));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices(), nullValue());
+ assertThat(request.requests().get(2).types().length, equalTo(0));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ assertThat(request.requests().get(4).indices(), nullValue());
+ assertThat(request.requests().get(4).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd2() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(5));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices(), nullValue());
+ assertThat(request.requests().get(2).types().length, equalTo(0));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ assertThat(request.requests().get(4).indices(), nullValue());
+ assertThat(request.requests().get(4).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd3() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
+ assertThat(request.requests().size(), equalTo(4));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
+ assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
+ assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
+ assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(2).types()[0], equalTo("type2"));
+ assertThat(request.requests().get(2).types()[1], equalTo("type1"));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
new file mode 100644
index 0000000..dd2f582
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
@@ -0,0 +1,10 @@
+{"index":"test", "ignore_unavailable" : true, "expand_wildcards" : "open,closed"}}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch2.json b/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
new file mode 100644
index 0000000..a9aca8b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
@@ -0,0 +1,10 @@
+{"index":"test"}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch3.json b/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
new file mode 100644
index 0000000..9cdff90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
@@ -0,0 +1,8 @@
+{"index":["test0", "test1"]}
+{"query" : {"match_all" {}}}
+{"index" : "test2,test3", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ]}
+{"query" : {"match_all" {}}}
+{"search_type" : "count"}
+{"query" : {"match_all" {}}}
diff --git a/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java b/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java
new file mode 100644
index 0000000..cfc81e7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/suggest/SuggestActionTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.suggest;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+import org.elasticsearch.search.suggest.SuggestSearchTests;
+
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SuggestActionTests extends SuggestSearchTests {
+
+ protected Suggest searchSuggest(Client client, String suggestText, int expectShardsFailed, SuggestionBuilder<?>... suggestions) {
+ SuggestRequestBuilder builder = client.prepareSuggest();
+
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+
+ SuggestResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ if (expectShardsFailed > 0) {
+ throw new SearchPhaseExecutionException("suggest", "Suggest execution failed", new ShardSearchFailure[0]);
+ }
+ return actionGet.getSuggest();
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java b/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java
new file mode 100644
index 0000000..6f897df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/AbstractTermVectorTests.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.inject.internal.Join;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTest {
+
+ protected static class TestFieldSetting {
+ final public String name;
+ final public boolean storedOffset;
+ final public boolean storedPayloads;
+ final public boolean storedPositions;
+
+ public TestFieldSetting(String name, boolean storedOffset, boolean storedPayloads, boolean storedPositions) {
+ this.name = name;
+ this.storedOffset = storedOffset;
+ this.storedPayloads = storedPayloads;
+ this.storedPositions = storedPositions;
+ }
+
+ public void addToMappings(XContentBuilder mappingsBuilder) throws IOException {
+ mappingsBuilder.startObject(name);
+ mappingsBuilder.field("type", "string");
+ String tv_settings;
+ if (storedPositions && storedOffset && storedPayloads) {
+ tv_settings = "with_positions_offsets_payloads";
+ } else if (storedPositions && storedOffset) {
+ tv_settings = "with_positions_offsets";
+ } else if (storedPayloads) {
+ tv_settings = "with_positions_payloads";
+ } else if (storedPositions) {
+ tv_settings = "with_positions";
+ } else if (storedOffset) {
+ tv_settings = "with_offsets";
+ } else {
+ tv_settings = "yes";
+ }
+
+ mappingsBuilder.field("term_vector", tv_settings);
+
+ if (storedPayloads) {
+ mappingsBuilder.field("analyzer", "tv_test");
+ }
+
+ mappingsBuilder.endObject();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("name: ").append(name).append(" tv_with:");
+ if (storedPayloads) {
+ sb.append("payloads,");
+ }
+ if (storedOffset) {
+ sb.append("offsets,");
+ }
+ if (storedPositions) {
+ sb.append("positions,");
+ }
+ return sb.toString();
+ }
+ }
+
+ protected static class TestDoc {
+ final public String id;
+ final public TestFieldSetting[] fieldSettings;
+ final public String[] fieldContent;
+ public String index = "test";
+ public String type = "type1";
+
+ public TestDoc(String id, TestFieldSetting[] fieldSettings, String[] fieldContent) {
+ this.id = id;
+ assertEquals(fieldSettings.length, fieldContent.length);
+ this.fieldSettings = fieldSettings;
+ this.fieldContent = fieldContent;
+ }
+
+ public TestDoc index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+
+ StringBuilder sb = new StringBuilder("index:").append(index).append(" type:").append(type).append(" id:").append(id);
+ for (int i = 0; i < fieldSettings.length; i++) {
+ TestFieldSetting f = fieldSettings[i];
+ sb.append("\n").append("Field: ").append(f).append("\n content:").append(fieldContent[i]);
+ }
+ sb.append("\n");
+
+ return sb.toString();
+ }
+ }
+
+ protected static class TestConfig {
+ final public TestDoc doc;
+ final public String[] selectedFields;
+ final public boolean requestPositions;
+ final public boolean requestOffsets;
+ final public boolean requestPayloads;
+ public Class expectedException = null;
+
+ public TestConfig(TestDoc doc, String[] selectedFields, boolean requestPositions, boolean requestOffsets, boolean requestPayloads) {
+ this.doc = doc;
+ this.selectedFields = selectedFields;
+ this.requestPositions = requestPositions;
+ this.requestOffsets = requestOffsets;
+ this.requestPayloads = requestPayloads;
+ }
+
+ public TestConfig expectedException(Class exceptionClass) {
+ this.expectedException = exceptionClass;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ String requested = "";
+ if (requestOffsets) {
+ requested += "offsets,";
+ }
+ if (requestPositions) {
+ requested += "position,";
+ }
+ if (requestPayloads) {
+ requested += "payload,";
+ }
+ Locale aLocale = new Locale("en", "US");
+ return String.format(aLocale, "(doc: %s\n requested: %s, fields: %s)", doc, requested,
+ selectedFields == null ? "NULL" : Join.join(",", selectedFields));
+ }
+ }
+
+ protected void createIndexBasedOnFieldSettings(TestFieldSetting[] fieldSettings, int number_of_shards) throws IOException {
+ cluster().wipeIndices("test");
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("type1").startObject("properties");
+ for (TestFieldSetting field : fieldSettings) {
+ field.addToMappings(mappingBuilder);
+ }
+ ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase");
+ if (number_of_shards > 0) {
+ settings.put("number_of_shards", number_of_shards);
+ }
+ mappingBuilder.endObject().endObject().endObject();
+ prepareCreate("test").addMapping("type1", mappingBuilder).setSettings(settings).get();
+
+ ensureYellow();
+ }
+
+ /**
+ * Generate test documentsThe returned documents are already indexed.
+ */
+ protected TestDoc[] generateTestDocs(int numberOfDocs, TestFieldSetting[] fieldSettings) {
+ String[] fieldContentOptions = new String[]{"Generating a random permutation of a sequence (such as when shuffling cards).",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Generating random numbers: see Random number generation.",
+ "Transforming a data stream (such as when using a scrambler in telecommunications)."};
+
+ String[] contentArray = new String[fieldSettings.length];
+ Map<String, Object> docSource = new HashMap<String, Object>();
+ TestDoc[] testDocs = new TestDoc[numberOfDocs];
+ for (int docId = 0; docId < numberOfDocs; docId++) {
+ docSource.clear();
+ for (int i = 0; i < contentArray.length; i++) {
+ contentArray[i] = fieldContentOptions[randomInt(fieldContentOptions.length - 1)];
+ docSource.put(fieldSettings[i].name, contentArray[i]);
+ }
+ TestDoc doc = new TestDoc(Integer.toString(docId), fieldSettings, contentArray.clone());
+ index(doc.index, doc.type, doc.id, docSource);
+ testDocs[docId] = doc;
+ }
+
+ refresh();
+ return testDocs;
+
+ }
+
+ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs, TestFieldSetting[] fieldSettings) {
+ ArrayList<TestConfig> configs = new ArrayList<TestConfig>();
+ for (int i = 0; i < numberOfTests; i++) {
+
+ ArrayList<String> selectedFields = null;
+ if (randomBoolean()) {
+ // used field selection
+ selectedFields = new ArrayList<String>();
+ if (randomBoolean()) {
+ selectedFields.add("Doesnt_exist"); // this will be ignored.
+ }
+ for (TestFieldSetting field : fieldSettings)
+ if (randomBoolean()) {
+ selectedFields.add(field.name);
+ }
+
+ if (selectedFields.size() == 0) {
+ selectedFields = null; // 0 length set is not supported.
+ }
+
+ }
+ TestConfig config = new TestConfig(testDocs[randomInt(testDocs.length - 1)], selectedFields == null ? null
+ : selectedFields.toArray(new String[]{}), randomBoolean(), randomBoolean(), randomBoolean());
+
+ configs.add(config);
+ }
+ // always adds a test that fails
+ configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist"),
+ new String[]{"doesnt_exist"}, true, true, true).expectedException(IndexMissingException.class));
+
+ refresh();
+
+ return configs.toArray(new TestConfig[]{});
+ }
+
+ protected TestFieldSetting[] getFieldSettings() {
+ return new TestFieldSetting[]{new TestFieldSetting("field_with_positions", false, false, true),
+ new TestFieldSetting("field_with_offsets", true, false, false),
+ new TestFieldSetting("field_with_only_tv", false, false, false),
+ new TestFieldSetting("field_with_positions_offsets", false, false, true),
+ new TestFieldSetting("field_with_positions_payloads", false, true, true)
+
+ };
+ }
+
+ protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {
+
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ for (TestFieldSetting field : testDocs[0].fieldSettings) {
+ if (field.storedPayloads) {
+ mapping.put(field.name, new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new StandardTokenizer(Version.CURRENT.luceneVersion, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.CURRENT.luceneVersion, tokenizer);
+ filter = new TypeAsPayloadTokenFilter(filter);
+ return new TokenStreamComponents(tokenizer, filter);
+ }
+
+ });
+ }
+ }
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.CURRENT.luceneVersion, CharArraySet.EMPTY_SET), mapping);
+
+ Directory dir = new RAMDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(Version.CURRENT.luceneVersion, wrapper);
+
+ conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ for (TestDoc doc : testDocs) {
+ Document d = new Document();
+ d.add(new Field("id", doc.id, StringField.TYPE_STORED));
+ for (int i = 0; i < doc.fieldContent.length; i++) {
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ TestFieldSetting fieldSetting = doc.fieldSettings[i];
+
+ type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
+ type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
+ type.setStoreTermVectorPositions(fieldSetting.storedPositions || fieldSetting.storedPayloads || fieldSetting.storedOffset);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
+ }
+ writer.updateDocument(new Term("id", doc.id), d);
+ writer.commit();
+ }
+ writer.close();
+
+ return DirectoryReader.open(dir);
+ }
+
+ protected void validateResponse(TermVectorResponse esResponse, Fields luceneFields, TestConfig testConfig) throws IOException {
+ TestDoc testDoc = testConfig.doc;
+ HashSet<String> selectedFields = testConfig.selectedFields == null ? null : new HashSet<String>(
+ Arrays.asList(testConfig.selectedFields));
+ Fields esTermVectorFields = esResponse.getFields();
+ for (TestFieldSetting field : testDoc.fieldSettings) {
+ Terms esTerms = esTermVectorFields.terms(field.name);
+ if (selectedFields != null && !selectedFields.contains(field.name)) {
+ assertNull(esTerms);
+ continue;
+ }
+
+ assertNotNull(esTerms);
+
+ Terms luceneTerms = luceneFields.terms(field.name);
+ TermsEnum esTermEnum = esTerms.iterator(null);
+ TermsEnum luceneTermEnum = luceneTerms.iterator(null);
+
+ while (esTermEnum.next() != null) {
+ assertNotNull(luceneTermEnum.next());
+
+ assertThat(esTermEnum.totalTermFreq(), equalTo(luceneTermEnum.totalTermFreq()));
+ DocsAndPositionsEnum esDocsPosEnum = esTermEnum.docsAndPositions(null, null, 0);
+ DocsAndPositionsEnum luceneDocsPosEnum = luceneTermEnum.docsAndPositions(null, null, 0);
+ if (luceneDocsPosEnum == null) {
+ // test we expect that...
+ assertFalse(field.storedOffset);
+ assertFalse(field.storedPayloads);
+ assertFalse(field.storedPositions);
+ continue;
+ }
+
+ String currentTerm = esTermEnum.term().utf8ToString();
+
+ assertThat("Token mismatch for field: " + field.name, currentTerm, equalTo(luceneTermEnum.term().utf8ToString()));
+
+ esDocsPosEnum.nextDoc();
+ luceneDocsPosEnum.nextDoc();
+
+ int freq = esDocsPosEnum.freq();
+ assertThat(freq, equalTo(luceneDocsPosEnum.freq()));
+ for (int i = 0; i < freq; i++) {
+ String failDesc = " (field:" + field.name + " term:" + currentTerm + ")";
+ int lucenePos = luceneDocsPosEnum.nextPosition();
+ int esPos = esDocsPosEnum.nextPosition();
+ if (field.storedPositions && testConfig.requestPositions) {
+ assertThat("Position test failed" + failDesc, lucenePos, equalTo(esPos));
+ } else {
+ assertThat("Missing position test failed" + failDesc, esPos, equalTo(-1));
+ }
+ if (field.storedOffset && testConfig.requestOffsets) {
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.startOffset(), equalTo(esDocsPosEnum.startOffset()));
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.endOffset(), equalTo(esDocsPosEnum.endOffset()));
+ } else {
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.startOffset(), equalTo(-1));
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.endOffset(), equalTo(-1));
+ }
+ if (field.storedPayloads && testConfig.requestPayloads) {
+ assertThat("Payload test failed" + failDesc, luceneDocsPosEnum.getPayload(), equalTo(esDocsPosEnum.getPayload()));
+ } else {
+ assertThat("Missing payload test failed" + failDesc, esDocsPosEnum.getPayload(), equalTo(null));
+ }
+
+ }
+ }
+
+ assertNull("Es returned terms are done but lucene isn't", luceneTermEnum.next());
+
+ }
+
+ }
+
+ protected TermVectorRequestBuilder getRequestForConfig(TestConfig config) {
+ return client().prepareTermVector(config.doc.index, config.doc.type, config.doc.id).setPayloads(config.requestPayloads)
+ .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true)
+ .setSelectedFields(config.selectedFields);
+
+ }
+
+ protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException {
+ IndexSearcher searcher = new IndexSearcher(directoryReader);
+ TopDocs search = searcher.search(new TermQuery(new Term("id", doc.id)), 1);
+
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(1, scoreDocs.length);
+ return directoryReader.getTermVectors(scoreDocs[0].doc);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java
new file mode 100644
index 0000000..3908e85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorCheckDocFreqTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.BytesStream;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class GetTermVectorCheckDocFreqTests extends ElasticsearchIntegrationTest {
+
+
+
+ @Test
+ public void testSimpleTermVectors() throws ElasticsearchException, IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .put("index.number_of_replicas", 0)
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureGreen();
+ int numDocs = 15;
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" };
+ int[] freq = { 1, 1, 1, 1, 1, 1, 1, 2 };
+ int[][] pos = { { 2 }, { 8 }, { 3 }, { 4 }, { 7 }, { 5 }, { 1 }, { 0, 6 } };
+ int[][] startOffset = { { 10 }, { 40 }, { 16 }, { 20 }, { 35 }, { 26 }, { 4 }, { 0, 31 } };
+ int[][] endOffset = { { 15 }, { 43 }, { 19 }, { 25 }, { 39 }, { 30 }, { 9 }, { 3, 34 } };
+ for (int i = 0; i < numDocs; i++) {
+ checkAllInfo(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutTermStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutFieldStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ }
+ }
+
+ private void checkWithoutFieldStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(true).setFieldStatistics(false).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
+ assertThat(terms.getDocCount(), Matchers.equalTo(-1));
+ assertThat(terms.getSumDocFreq(), equalTo((long) -1));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkWithoutTermStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(false).setFieldStatistics(true).setSelectedFields();
+ assertThat(resp.request().termStatistics(), equalTo(false));
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+
+ assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq()));
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(-1));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkAllInfo(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset, int i)
+ throws IOException {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setFieldStatistics(true).setTermStatistics(true).setSelectedFields();
+ assertThat(resp.request().fieldStatistics(), equalTo(true));
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = new XContentFactory().jsonBuilder();
+
+ response.toXContent(xBuilder, null);
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8();
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java
new file mode 100644
index 0000000..f3dbb41
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/GetTermVectorTests.java
@@ -0,0 +1,544 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+public class GetTermVectorTests extends AbstractTermVectorTests {
+
+
+
+ @Test
+ public void testNoSuchDoc() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "666").setSource("field", "foo bar").execute().actionGet();
+ refresh();
+ for (int i = 0; i < 20; i++) {
+ ActionFuture<TermVectorResponse> termVector = client().termVector(new TermVectorRequest("test", "type1", "" + i));
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet, Matchers.notNullValue());
+ assertThat(actionGet.isExists(), Matchers.equalTo(false));
+
+ }
+
+ }
+
+ @Test
+ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+
+ ensureYellow();
+ // when indexing a field that simply has a question mark, the term
+ // vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("existingfield", "?").execute().actionGet();
+ refresh();
+ String[] selectedFields = { "existingfield" };
+ ActionFuture<TermVectorResponse> termVector = client().termVector(
+ new TermVectorRequest("test", "type1", "0").selectedFields(selectedFields));
+ // lets see if the null term vectors are caught...
+ termVector.actionGet();
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet.isExists(), Matchers.equalTo(true));
+
+ }
+
+ @Test
+ public void testExistingFieldButNotInDocNPE() throws Exception {
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureYellow();
+ // when indexing a field that simply has a question mark, the term
+ // vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("anotherexistingfield", 1).execute().actionGet();
+ refresh();
+ String[] selectedFields = { "existingfield" };
+ ActionFuture<TermVectorResponse> termVector = client().termVector(
+ new TermVectorRequest("test", "type1", "0").selectedFields(selectedFields));
+ // lets see if the null term vectors are caught...
+ TermVectorResponse actionGet = termVector.actionGet();
+ assertThat(actionGet.isExists(), Matchers.equalTo(true));
+
+ }
+
+
+
+ @Test
+ public void testSimpleTermVectors() throws ElasticsearchException, IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i)).setPayloads(true)
+ .setOffsets(true).setPositions(true).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ // do not test ttf or doc frequency, because here we have many
+ // shards and do not know how documents are distributed
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+ }
+
+ @Test
+ public void testRandomSingleTermVectors() throws ElasticsearchException, IOException {
+ FieldType ft = new FieldType();
+ int config = randomInt(6);
+ boolean storePositions = false;
+ boolean storeOffsets = false;
+ boolean storePayloads = false;
+ boolean storeTermVectors = false;
+ switch (config) {
+ case 0: {
+ // do nothing
+ }
+ case 1: {
+ storeTermVectors = true;
+ }
+ case 2: {
+ storeTermVectors = true;
+ storePositions = true;
+ }
+ case 3: {
+ storeTermVectors = true;
+ storeOffsets = true;
+ }
+ case 4: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ }
+ case 5: {
+ storeTermVectors = true;
+ storePositions = true;
+ storePayloads = true;
+ }
+ case 6: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ storePayloads = true;
+ }
+ }
+ ft.setStoreTermVectors(storeTermVectors);
+ ft.setStoreTermVectorOffsets(storeOffsets);
+ ft.setStoreTermVectorPayloads(storePayloads);
+ ft.setStoreTermVectorPositions(storePositions);
+
+ String optionString = AbstractFieldMapper.termVectorOptionsToString(ft);
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", optionString)
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+
+ boolean isPayloadRequested = randomBoolean();
+ boolean isOffsetRequested = randomBoolean();
+ boolean isPositionsRequested = randomBoolean();
+ String infoString = createInfoString(isPositionsRequested, isOffsetRequested, isPayloadRequested, optionString);
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(i))
+ .setPayloads(isPayloadRequested).setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
+ if (ft.storeTermVectors()) {
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator(null);
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(infoString, next, Matchers.notNullValue());
+ assertThat(infoString + "expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(infoString, next, Matchers.notNullValue());
+ // do not test ttf or doc frequency, because here we have
+ // many shards and do not know how documents are distributed
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ // docs and pos only returns something if positions or
+ // payloads or offsets are stored / requestd Otherwise use
+ // DocsEnum?
+ assertThat(infoString, docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(infoString, freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString, termPos.length, equalTo(freq[j]));
+ }
+ if (isOffsetRequested && storeOffsets) {
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ }
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ // only return something useful if requested and stored
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString + "positions for term: " + string, nextPosition, equalTo(termPos[k]));
+ } else {
+ assertThat(infoString + "positions for term: ", nextPosition, equalTo(-1));
+ }
+
+ // only return something useful if requested and stored
+ if (isPayloadRequested && storePayloads) {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef(
+ "word")));
+ } else {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(null));
+ }
+ // only return something useful if requested and stored
+ if (isOffsetRequested && storeOffsets) {
+
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(),
+ equalTo(termStartOffset[k]));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ } else {
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(), equalTo(-1));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(-1));
+ }
+
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+
+ }
+ }
+
+ private String createInfoString(boolean isPositionsRequested, boolean isOffsetRequested, boolean isPayloadRequested,
+ String optionString) {
+ String ret = "Store config: " + optionString + "\n" + "Requested: pos-"
+ + (isPositionsRequested ? "yes" : "no") + ", offsets-" + (isOffsetRequested ? "yes" : "no") + ", payload- "
+ + (isPayloadRequested ? "yes" : "no") + "\n";
+ return ret;
+ }
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings(testFieldSettings, -1);
+ TestDoc[] testDocs = generateTestDocs(5, testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ for (TestConfig test : testConfigs) {
+ try {
+ TermVectorRequestBuilder request = getRequestForConfig(test);
+ if (test.expectedException != null) {
+ assertThrows(request, test.expectedException);
+ continue;
+ }
+
+ TermVectorResponse response = request.get();
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(response, luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+ }
+
+ @Test
+ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws ElasticsearchException, IOException {
+
+ //create the test document
+ int encoding = randomIntBetween(0, 2);
+ String encodingString = "";
+ if (encoding == 0) {
+ encodingString = "float";
+ }
+ if (encoding == 1) {
+ encodingString = "int";
+ }
+ if (encoding == 2) {
+ encodingString = "identity";
+ }
+ String[] tokens = crateRandomTokens();
+ Map<String, List<BytesRef>> payloads = createPayloads(tokens, encoding);
+ String delimiter = createRandomDelimiter(tokens);
+ String queryString = createString(tokens, payloads, encoding, delimiter.charAt(0));
+ //create the mapping
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field").field("type", "string").field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_test.filter", "my_delimited_payload_filter")
+ .put("index.analysis.filter.my_delimited_payload_filter.delimiter", delimiter)
+ .put("index.analysis.filter.my_delimited_payload_filter.encoding", encodingString)
+ .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter")));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", Integer.toString(1))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", queryString).endObject()).execute().actionGet();
+ refresh();
+ TermVectorRequestBuilder resp = client().prepareTermVector("test", "type1", Integer.toString(1)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setSelectedFields();
+ TermVectorResponse response = resp.execute().actionGet();
+ assertThat("doc id 1 doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ TermsEnum iterator = terms.iterator(null);
+ while (iterator.next() != null) {
+ String term = iterator.term().utf8ToString();
+ DocsAndPositionsEnum docsAndPositions = iterator.docsAndPositions(null, null);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ List<BytesRef> curPayloads = payloads.get(term);
+ assertThat(term, curPayloads, Matchers.notNullValue());
+ assertNotNull(docsAndPositions);
+ for (int k = 0; k < docsAndPositions.freq(); k++) {
+ docsAndPositions.nextPosition();
+ if (docsAndPositions.getPayload()!=null){
+ String infoString = "\nterm: " + term + " has payload \n"+ docsAndPositions.getPayload().toString() + "\n but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, docsAndPositions.getPayload(), equalTo(curPayloads.get(k)));
+ } else {
+ String infoString = "\nterm: " + term + " has no payload but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, curPayloads.get(k).length, equalTo(0));
+ }
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+ }
+ private String createRandomDelimiter(String[] tokens) {
+ String delimiter = "";
+ boolean isTokenOrWhitespace = true;
+ while(isTokenOrWhitespace) {
+ isTokenOrWhitespace = false;
+ delimiter = randomUnicodeOfLength(1);
+ for(String token:tokens) {
+ if(token.contains(delimiter)) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ if(Character.isWhitespace(delimiter.charAt(0))) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ return delimiter;
+ }
+ private String createString(String[] tokens, Map<String, List<BytesRef>> payloads, int encoding, char delimiter) {
+ String resultString = "";
+ ObjectIntOpenHashMap<String> payloadCounter = new ObjectIntOpenHashMap<String>();
+ for (String token : tokens) {
+ if (!payloadCounter.containsKey(token)) {
+ payloadCounter.putIfAbsent(token, 0);
+ } else {
+ payloadCounter.put(token, payloadCounter.get(token) + 1);
+ }
+ resultString = resultString + token;
+ BytesRef payload = payloads.get(token).get(payloadCounter.get(token));
+ if (payload.length > 0) {
+ resultString = resultString + delimiter;
+ switch (encoding) {
+ case 0: {
+ resultString = resultString + Float.toString(PayloadHelper.decodeFloat(payload.bytes, payload.offset));
+ break;
+ }
+ case 1: {
+ resultString = resultString + Integer.toString(PayloadHelper.decodeInt(payload.bytes, payload.offset));
+ break;
+ }
+ case 2: {
+ resultString = resultString + payload.utf8ToString();
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ }
+ resultString = resultString + " ";
+ }
+ return resultString;
+ }
+
+ private Map<String, List<BytesRef>> createPayloads(String[] tokens, int encoding) {
+ Map<String, List<BytesRef>> payloads = new HashMap<String, List<BytesRef>>();
+ for (String token : tokens) {
+ if (payloads.get(token) == null) {
+ payloads.put(token, new ArrayList<BytesRef>());
+ }
+ boolean createPayload = randomBoolean();
+ if (createPayload) {
+ switch (encoding) {
+ case 0: {
+ float theFloat = randomFloat();
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeFloat(theFloat)));
+ break;
+ }
+ case 1: {
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeInt(randomInt())));
+ break;
+ }
+ case 2: {
+ String payload = randomUnicodeOfLengthBetween(50, 100);
+ for (int c = 0; c < payload.length(); c++) {
+ if (Character.isWhitespace(payload.charAt(c))) {
+ payload = payload.replace(payload.charAt(c), 'w');
+ }
+ }
+ payloads.get(token).add(new BytesRef(payload));
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ } else {
+ payloads.get(token).add(new BytesRef());
+ }
+ }
+ return payloads;
+ }
+
+ private String[] crateRandomTokens() {
+ String[] tokens = { "the", "quick", "brown", "fox" };
+ int numTokensWithDuplicates = randomIntBetween(3, 15);
+ String[] finalTokens = new String[numTokensWithDuplicates];
+ for (int i = 0; i < numTokensWithDuplicates; i++) {
+ finalTokens[i] = tokens[randomIntBetween(0, tokens.length - 1)];
+ }
+ return finalTokens;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java b/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java
new file mode 100644
index 0000000..8ce9a95
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/MultiTermVectorsTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Fields;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class MultiTermVectorsTests extends AbstractTermVectorTests {
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ AbstractTermVectorTests.TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings(testFieldSettings, -1);
+ AbstractTermVectorTests.TestDoc[] testDocs = generateTestDocs(5, testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ AbstractTermVectorTests.TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ MultiTermVectorsRequestBuilder requestBuilder = client().prepareMultiTermVectors();
+ for (AbstractTermVectorTests.TestConfig test : testConfigs) {
+ requestBuilder.add(getRequestForConfig(test).request());
+ }
+
+ MultiTermVectorsItemResponse[] responseItems = requestBuilder.get().getResponses();
+
+ for (int i = 0; i < testConfigs.length; i++) {
+ TestConfig test = testConfigs[i];
+ try {
+ MultiTermVectorsItemResponse item = responseItems[i];
+ if (test.expectedException != null) {
+ assertTrue(item.isFailed());
+ continue;
+ } else if (item.isFailed()) {
+ fail(item.getFailure().getMessage());
+ }
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(item.getResponse(), luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+
+ }
+
+ public void testMissingIndexThrowsMissingIndex() throws Exception {
+ TermVectorRequestBuilder requestBuilder = client().prepareTermVector("testX", "typeX", Integer.toString(1));
+ MultiTermVectorsRequestBuilder mtvBuilder = new MultiTermVectorsRequestBuilder(client());
+ mtvBuilder.add(requestBuilder.request());
+ MultiTermVectorsResponse response = mtvBuilder.execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getFailure().getMessage(), equalTo("[" + response.getResponses()[0].getIndex() + "] missing"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java b/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java
new file mode 100644
index 0000000..36ee3cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/TermVectorUnitTests.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvector;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.action.termvector.TermVectorRequest.Flag;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.TypeParsers;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.rest.action.termvector.RestTermVectorAction;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class TermVectorUnitTests extends ElasticsearchLuceneTestCase {
+
+ @Test
+ public void streamResponse() throws Exception {
+
+ TermVectorResponse outResponse = new TermVectorResponse("a", "b", "c");
+ outResponse.setExists(true);
+ writeStandardTermVector(outResponse);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorResponse inResponse = new TermVectorResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+
+ // see if correct
+ checkIfStandardTermVector(inResponse);
+
+ outResponse = new TermVectorResponse("a", "b", "c");
+ writeEmptyTermVector(outResponse);
+ // write
+ outBuffer = new ByteArrayOutputStream();
+ out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ esBuffer = new InputStreamStreamInput(esInBuffer);
+ inResponse = new TermVectorResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+ assertTrue(inResponse.isExists());
+
+ }
+
+ private void writeEmptyTermVector(TermVectorResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT));
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields fields = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(fields, null, flags, fields);
+ outResponse.setExists(true);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void writeStandardTermVector(TermVectorResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT));
+
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+ d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
+ d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields termVectors = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(termVectors, null, flags, termVectors);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void checkIfStandardTermVector(TermVectorResponse inResponse) throws IOException {
+
+ Fields fields = inResponse.getFields();
+ assertThat(fields.terms("title"), Matchers.notNullValue());
+ assertThat(fields.terms("desc"), Matchers.notNullValue());
+ assertThat(fields.size(), equalTo(2));
+ }
+
+ @Test
+ public void testRestRequestParsing() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : [\"a\", \"b\",\"c\"], \"offsets\":false, \"positions\":false, \"payloads\":true}");
+
+ TermVectorRequest tvr = new TermVectorRequest(null, null, null);
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+
+ Set<String> fields = tvr.selectedFields();
+ assertThat(fields.contains("a"), equalTo(true));
+ assertThat(fields.contains("b"), equalTo(true));
+ assertThat(fields.contains("c"), equalTo(true));
+ assertThat(tvr.offsets(), equalTo(false));
+ assertThat(tvr.positions(), equalTo(false));
+ assertThat(tvr.payloads(), equalTo(true));
+ String additionalFields = "b,c ,d, e ";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(5));
+ assertThat(fields.contains("d"), equalTo(true));
+ assertThat(fields.contains("e"), equalTo(true));
+
+ additionalFields = "";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+
+ inputBytes = new BytesArray(" {\"offsets\":false, \"positions\":false, \"payloads\":true}");
+ tvr = new TermVectorRequest(null, null, null);
+ parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+ additionalFields = "";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields(), equalTo(null));
+ additionalFields = "b,c ,d, e ";
+ RestTermVectorAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testRequestParsingThrowsException() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : \"a, b,c \", \"offsets\":false, \"positions\":false, \"payloads\":true, \"meaningless_term\":2}");
+ TermVectorRequest tvr = new TermVectorRequest(null, null, null);
+ boolean threwException = false;
+ try {
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorRequest.parseRequest(tvr, parser);
+ } catch (Exception e) {
+ threwException = true;
+ }
+ assertThat(threwException, equalTo(true));
+
+ }
+
+ @Test
+ public void streamRequest() throws IOException {
+
+ for (int i = 0; i < 10; i++) {
+ TermVectorRequest request = new TermVectorRequest("index", "type", "id");
+ request.offsets(random().nextBoolean());
+ request.fieldStatistics(random().nextBoolean());
+ request.payloads(random().nextBoolean());
+ request.positions(random().nextBoolean());
+ request.termStatistics(random().nextBoolean());
+ String parent = random().nextBoolean() ? "someParent" : null;
+ request.parent(parent);
+ String pref = random().nextBoolean() ? "somePreference" : null;
+ request.preference(pref);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ request.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorRequest req2 = new TermVectorRequest(null, null, null);
+ req2.readFrom(esBuffer);
+
+ assertThat(request.offsets(), equalTo(req2.offsets()));
+ assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics()));
+ assertThat(request.payloads(), equalTo(req2.payloads()));
+ assertThat(request.positions(), equalTo(req2.positions()));
+ assertThat(request.termStatistics(), equalTo(req2.termStatistics()));
+ assertThat(request.preference(), equalTo(pref));
+ assertThat(request.routing(), equalTo(parent));
+
+ }
+ }
+
+ @Test
+ public void testFieldTypeToTermVectorString() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(false);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(true);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat("with_positions_payloads", equalTo(ftOpts));
+ AllFieldMapper.Builder builder = new AllFieldMapper.Builder();
+ boolean exceptiontrown = false;
+ try {
+ TypeParsers.parseTermVector("", ftOpts, builder);
+ } catch (MapperParsingException e) {
+ exceptiontrown = true;
+ }
+ assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false));
+ }
+
+ @Test
+ public void testTermVectorStringGenerationWithoutPositions() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(true);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(false);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat(ftOpts, equalTo("with_offsets"));
+ }
+
+ @Test
+ public void testMultiParser() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvector/multiRequest1.json");
+ BytesReference bytes = new BytesArray(data);
+ MultiTermVectorsRequest request = new MultiTermVectorsRequest();
+ request.add(new TermVectorRequest(), bytes);
+ checkParsedParameters(request);
+
+ data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvector/multiRequest2.json");
+ bytes = new BytesArray(data);
+ request = new MultiTermVectorsRequest();
+ request.add(new TermVectorRequest(), bytes);
+ checkParsedParameters(request);
+
+ }
+ void checkParsedParameters(MultiTermVectorsRequest request) {
+ Set<String> ids = new HashSet<String>();
+ ids.add("1");
+ ids.add("2");
+ Set<String> fields = new HashSet<String>();
+ fields.add("a");
+ fields.add("b");
+ fields.add("c");
+ for (TermVectorRequest singleRequest : request.requests) {
+ assertThat(singleRequest.index(), equalTo("testidx"));
+ assertThat(singleRequest.type(), equalTo("test"));
+ assertThat(singleRequest.payloads(), equalTo(false));
+ assertThat(singleRequest.positions(), equalTo(false));
+ assertThat(singleRequest.offsets(), equalTo(false));
+ assertThat(singleRequest.termStatistics(), equalTo(true));
+ assertThat(singleRequest.fieldStatistics(), equalTo(false));
+ assertThat(singleRequest.id(),Matchers.anyOf(Matchers.equalTo("1"), Matchers.equalTo("2")));
+ assertThat(singleRequest.selectedFields(), equalTo(fields));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json b/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json
new file mode 100644
index 0000000..fcb5e3a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/multiRequest1.json
@@ -0,0 +1,13 @@
+{
+ "ids": ["1","2"],
+ "parameters": {
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads":false,
+ "offsets":false,
+ "positions":false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type":"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json b/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json
new file mode 100644
index 0000000..a0709ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/action/termvector/multiRequest2.json
@@ -0,0 +1,26 @@
+{
+ "docs": [
+ {
+ "_id": "1",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ },
+ {
+ "_id": "2",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
new file mode 100644
index 0000000..3b7cd27
--- /dev/null
+++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
@@ -0,0 +1,834 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.aliases;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.test.hamcrest.CollectionAssertions.hasKey;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class IndexAliasesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work now");
+ IndexResponse indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test"));
+
+ logger.info("--> creating index [test_x]");
+ createIndex("test_x");
+
+ ensureGreen();
+
+ logger.info("--> remove [alias1], Aliasing index [test_x] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1").addAlias("test_x", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work against [test_x]");
+ indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test_x"));
+ }
+
+ @Test
+ public void testFailedFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ try {
+ logger.info("--> aliasing index [test] with [alias1] and filter [t]");
+ admin().indices().prepareAliases().addAlias("test", "alias1", "{ t }").get();
+ fail();
+ } catch (Exception e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and filter [user:kimchy]");
+ FilterBuilder filter = termFilter("user", "kimchy");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", filter));
+
+ // For now just making sure that filter was stored with the alias
+ logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]");
+ ClusterState clusterState = admin().cluster().prepareState().get().getState();
+ IndexMetaData indexMd = clusterState.metaData().index("test");
+ assertThat(indexMd.aliases().get("alias1").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}"));
+
+ }
+
+ @Test
+ public void testEmptyFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and empty filter");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", "{}"));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesSingleIndex() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "bars", termFilter("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termFilter("name", "test")));
+
+ logger.info("--> indexing against [test]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).refresh(true)).actionGet();
+
+ logger.info("--> checking single filtering alias search");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ logger.info("--> checking single filtering alias wildcard search");
+ searchResponse = client().prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_uid", SortOrder.ASC).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(true))
+ .get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with global facets and sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(true))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with non-global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addFacet(FacetBuilders.termsFacet("test").field("name").global(false))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertThat(((TermsFacet) searchResponse.getFacets().facet("test")).getEntries().size(), equalTo(2));
+
+ searchResponse = client().prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2");
+
+ logger.info("--> checking single non-filtering alias search");
+ searchResponse = client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking non-filtering alias and filtering alias search");
+ searchResponse = client().prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and filtering alias search");
+ searchResponse = client().prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and alias wildcard search");
+ searchResponse = client().prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesTwoIndices() throws Exception {
+ logger.info("--> creating index [test1]");
+ createIndex("test1");
+
+ logger.info("--> creating index [test2]");
+ createIndex("test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termFilter("name", "bar")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termFilter("name", "foo")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for two indices");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "5");
+ assertThat(client().prepareCount("foos").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(2L));
+
+ logger.info("--> checking filtering alias for one index");
+ searchResponse = client().prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "2");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+
+ logger.info("--> checking filtering alias for two indices and one complete index");
+ searchResponse = client().prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for one index");
+ searchResponse = client().prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(8L));
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get();
+ assertHits(searchResponse.getHits(), "4", "8");
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get().getCount(), equalTo(2L));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesMultipleIndices() throws Exception {
+ logger.info("--> creating indices");
+ createIndex("test1", "test2", "test3");
+
+ ensureGreen();
+
+ logger.info("--> adding aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "alias12"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "alias12"));
+
+ logger.info("--> adding filtering aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter1", termFilter("name", "test1")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "filter23", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter23", termFilter("name", "foo")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter13", termFilter("name", "baz")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter13", termFilter("name", "baz")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("11").source(source("11", "foo test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("12").source(source("12", "bar test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("13").source(source("13", "baz test1"))).get();
+
+ client().index(indexRequest("test2").type("type1").id("21").source(source("21", "foo test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("22").source(source("22", "bar test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("23").source(source("23", "baz test2"))).get();
+
+ client().index(indexRequest("test3").type("type1").id("31").source(source("31", "foo test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("32").source(source("32", "bar test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("33").source(source("33", "baz test3"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for multiple indices");
+ SearchResponse searchResponse = client().prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13");
+ assertThat(client().prepareCount("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "33");
+ assertThat(client().prepareCount("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33");
+ assertThat(client().prepareCount("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+ }
+
+ @Test
+ public void testDeletingByQueryFilteringAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2");
+ createIndex("test1", "test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termFilter("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "tests", termFilter("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termFilter("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "tests", termFilter("name", "test")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking counts before delete");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+
+ logger.info("--> delete by query from a single alias");
+ client().prepareDeleteByQuery("bars").setQuery(QueryBuilders.termQuery("name", "test")).get();
+
+ logger.info("--> verify that only one record was deleted");
+ assertThat(client().prepareCount("test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(3L));
+
+ logger.info("--> delete by query from an aliases pointing to two indices");
+ client().prepareDeleteByQuery("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ logger.info("--> verify that proper records were deleted");
+ SearchResponse searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "3", "4", "6", "7", "8");
+
+ logger.info("--> delete by query from an aliases and an index");
+ client().prepareDeleteByQuery("tests", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ logger.info("--> verify that proper records were deleted");
+ searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "4");
+ }
+
+
+
+ @Test
+ public void testDeleteAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2]");
+ createIndex("test1", "test2");
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1")
+ .addAlias("test1", "aliasToTests")
+ .addAlias("test1", "foos", termFilter("name", "foo"))
+ .addAlias("test1", "bars", termFilter("name", "bar"))
+ .addAlias("test1", "tests", termFilter("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2")
+ .addAlias("test2", "aliasToTests")
+ .addAlias("test2", "foos", termFilter("name", "foo"))
+ .addAlias("test2", "tests", termFilter("name", "test")));
+
+ String[] indices = {"test1", "test2"};
+ String[] aliases = {"aliasToTest1", "foos", "bars", "tests", "aliasToTest2", "aliasToTests"};
+
+ admin().indices().prepareAliases().removeAlias(indices, aliases).get();
+
+ AliasesExistResponse response = admin().indices().prepareAliasesExist(aliases).get();
+ assertThat(response.exists(), equalTo(false));
+ }
+
+
+ @Test
+ public void testWaitForAliasCreationMultipleShards() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasCreationSingleShard() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(admin().indices().create(createIndexRequest("test").settings(settingsBuilder().put("index.numberOfReplicas", 0).put("index.numberOfShards", 1))).get());
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasSimultaneousUpdate() throws Exception {
+ final int aliasCount = 10;
+
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ ExecutorService executor = Executors.newFixedThreadPool(aliasCount);
+ for (int i = 0; i < aliasCount; i++) {
+ final String aliasName = "alias" + i;
+ executor.submit(new Runnable() {
+ @Override
+ public void run() {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName));
+ client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"))).actionGet();
+ }
+ });
+ }
+ executor.shutdown();
+ boolean done = executor.awaitTermination(10, TimeUnit.SECONDS);
+ assertThat(done, equalTo(true));
+ if (!done) {
+ executor.shutdownNow();
+ }
+ }
+
+
+ @Test
+ public void testSameAlias() throws Exception {
+
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> creating alias1 ");
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1")));
+ TimeValue timeout = TimeValue.timeValueSeconds(2);
+ logger.info("--> recreating alias1 ");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> modifying alias1 to have a filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with the same filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with a different filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termFilter("name", "bar")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> verify that filter was updated");
+ AliasMetaData aliasMetaData = cluster().clusterService().state().metaData().aliases().get("alias1").get("test");
+ assertThat(aliasMetaData.getFilter().toString(), equalTo("{\"term\":{\"name\":\"bar\"}}"));
+
+ logger.info("--> deleting alias1");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+
+ }
+
+ @Test(expected = AliasesMissingException.class)
+ public void testIndicesRemoveNonExistingAliasResponds404() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> deleting alias1 which does not exist");
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1")));
+ }
+
+ @Test
+ public void testIndicesGetAliases() throws Exception {
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ logger.info("--> creating indices [foobar, test, test123, foobarbaz, bazbar]");
+ assertAcked(prepareCreate("foobar").setSettings(indexSettings));
+ assertAcked(prepareCreate("test").setSettings(indexSettings));
+ assertAcked(prepareCreate("test123").setSettings(indexSettings));
+ assertAcked(prepareCreate("foobarbaz").setSettings(indexSettings));
+ assertAcked(prepareCreate("bazbar").setSettings(indexSettings));
+
+ ensureGreen();
+
+ logger.info("--> creating aliases [alias1, alias2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("foobar", "alias1").addAlias("foobar", "alias2"));
+
+ logger.info("--> getting alias1");
+ GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting all aliases that start with alias*");
+ getResponse = admin().indices().prepareGetAliases("alias*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias2"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("alias*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+
+ logger.info("--> creating aliases [bar, baz, foo]");
+ assertAcked(admin().indices().prepareAliases()
+ .addAlias("bazbar", "bar")
+ .addAlias("bazbar", "bac", termFilter("field", "value"))
+ .addAlias("foobar", "foo"));
+
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(new AliasAction(AliasAction.Type.ADD, "foobar", "bac").routing("bla")));
+
+ logger.info("--> getting bar and baz for index bazbar");
+ getResponse = admin().indices().prepareGetAliases("bar", "bac").addIndices("bazbar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("bar", "bac")
+ .addIndices("bazbar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index baz*");
+ getResponse = admin().indices().prepareGetAliases("*b*").addIndices("baz*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("*b*")
+ .addIndices("baz*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("b*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), equalTo("bla"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), equalTo("bla"));
+ existsResponse = admin().indices().prepareAliasesExist("b*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting f* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("f*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work
+ logger.info("--> getting f* for index *bac");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting foo for index foobar");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work again
+ logger.info("--> getting * for index *bac");
+ getResponse = admin().indices().prepareGetAliases("*").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ existsResponse = admin().indices().prepareAliasesExist("*")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ assertAcked(admin().indices().prepareAliases()
+ .removeAlias("foobar", "foo"));
+
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse.getAliases().isEmpty(), equalTo(true));
+ existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(false));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testAddAliasNullIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testAddAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, null)).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testAddAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testRemoveAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, null)).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testRemoveAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testGetAllAliasesWorks() {
+ createIndex("index1");
+ createIndex("index2");
+
+ ensureYellow();
+
+ assertAcked(admin().indices().prepareAliases().addAlias("index1", "alias1").addAlias("index2", "alias2"));
+
+ GetAliasesResponse response = admin().indices().prepareGetAliases().get();
+ assertThat(response.getAliases(), hasKey("index1"));
+ assertThat(response.getAliases(), hasKey("index1"));
+ }
+
+ private void assertHits(SearchHits hits, String... ids) {
+ assertThat(hits.totalHits(), equalTo((long) ids.length));
+ Set<String> hitIds = newHashSet();
+ for (SearchHit hit : hits.getHits()) {
+ hitIds.add(hit.id());
+ }
+ assertThat(hitIds, containsInAnyOrder(ids));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java b/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
new file mode 100644
index 0000000..42f9686
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.aliases;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ */
+public class AliasesBenchmark {
+
+ private final static String INDEX_NAME = "my-index";
+
+ public static void main(String[] args) throws IOException {
+ int NUM_ADDITIONAL_NODES = 0;
+ int BASE_ALIAS_COUNT = 100000;
+ int NUM_ADD_ALIAS_REQUEST = 1000;
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "local")
+ .put("node.master", false).build();
+ Node node1 = NodeBuilder.nodeBuilder().settings(
+ ImmutableSettings.settingsBuilder().put(settings).put("node.master", true)
+ ).node();
+
+ Node[] otherNodes = new Node[NUM_ADDITIONAL_NODES];
+ for (int i = 0; i < otherNodes.length; i++) {
+ otherNodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Client client = node1.client();
+ try {
+ client.admin().indices().prepareCreate(INDEX_NAME).execute().actionGet();
+ } catch (IndexAlreadyExistsException e) {}
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+ int numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ if (numberOfAliases < BASE_ALIAS_COUNT) {
+ int diff = BASE_ALIAS_COUNT - numberOfAliases;
+ System.out.println("Adding " + diff + " more aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ for (int i = 1; i <= diff; i++) {
+ builder.addAlias(INDEX_NAME, Strings.randomBase64UUID());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ } else if (numberOfAliases > BASE_ALIAS_COUNT) {
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ int diff = numberOfAliases - BASE_ALIAS_COUNT;
+ System.out.println("Removing " + diff + " aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ List<AliasMetaData> aliases= client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet().getAliases().get(INDEX_NAME);
+ for (int i = 0; i <= diff; i++) {
+ builder.removeAlias(INDEX_NAME, aliases.get(i).alias());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ }
+
+ numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ long totalTime = 0;
+ int max = numberOfAliases + NUM_ADD_ALIAS_REQUEST;
+ for (int i = numberOfAliases; i <= max; i++) {
+ if (i != numberOfAliases && i % 100 == 0) {
+ long avgTime = totalTime / 100;
+ System.out.println("Added [" + (i - numberOfAliases) + "] aliases. Avg create time: " + avgTime + " ms");
+ totalTime = 0;
+ }
+
+ long time = System.currentTimeMillis();
+// String filter = termFilter("field" + i, "value" + i).toXContent(XContentFactory.jsonBuilder(), null).string();
+ client.admin().indices().prepareAliases().addAlias(INDEX_NAME, Strings.randomBase64UUID()/*, filter*/)
+ .execute().actionGet();
+ totalTime += System.currentTimeMillis() - time;
+ }
+ System.out.println("Number of aliases: " + countAliases(client));
+
+ client.close();
+ node1.close();
+ for (Node otherNode : otherNodes) {
+ otherNode.close();
+ }
+ }
+
+ private static int countAliases(Client client) {
+ GetAliasesResponse response = client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet();
+ if (response.getAliases().isEmpty()) {
+ return 0;
+ } else {
+ return response.getAliases().get(INDEX_NAME).size();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java b/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
new file mode 100644
index 0000000..15745fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.bloom;
+
+import org.apache.lucene.codecs.bloom.FuzzySet;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.util.BloomFilter;
+
+import java.security.SecureRandom;
+
+/**
+ */
+public class BloomBench {
+
+ public static void main(String[] args) throws Exception {
+ SecureRandom random = new SecureRandom();
+ final int ELEMENTS = (int) SizeValue.parseSizeValue("1m").singles();
+ final double fpp = 0.01;
+ BloomFilter gFilter = BloomFilter.create(ELEMENTS, fpp);
+ System.out.println("G SIZE: " + new ByteSizeValue(gFilter.getSizeInBytes()));
+
+ FuzzySet lFilter = FuzzySet.createSetBasedOnMaxMemory((int) gFilter.getSizeInBytes());
+ //FuzzySet lFilter = FuzzySet.createSetBasedOnQuality(ELEMENTS, 0.97f);
+
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ gFilter.put(bytesRef);
+ lFilter.addValue(bytesRef);
+ }
+
+ int lFalse = 0;
+ int gFalse = 0;
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ if (gFilter.mightContain(bytesRef)) {
+ gFalse++;
+ }
+ if (lFilter.contains(bytesRef) == FuzzySet.ContainsResult.MAYBE) {
+ lFalse++;
+ }
+ }
+ System.out.println("Failed positives, g[" + gFalse + "], l[" + lFalse + "]");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java b/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
new file mode 100644
index 0000000..660d042
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.checksum;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.security.MessageDigest;
+import java.util.zip.Adler32;
+import java.util.zip.CRC32;
+
+/**
+ *
+ */
+public class ChecksumBenchmark {
+
+ public static final int BATCH_SIZE = 16 * 1024;
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Warning up");
+ long warmSize = ByteSizeValue.parseBytesSizeValue("1g", null).bytes();
+ crc(warmSize);
+ adler(warmSize);
+ md5(warmSize);
+
+ long dataSize = ByteSizeValue.parseBytesSizeValue("10g", null).bytes();
+ System.out.println("Running size: " + dataSize);
+ crc(dataSize);
+ adler(dataSize);
+ md5(dataSize);
+ }
+
+ private static void crc(long dataSize) {
+ long start = System.currentTimeMillis();
+ CRC32 crc = new CRC32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("CRC took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void adler(long dataSize) {
+ long start = System.currentTimeMillis();
+ Adler32 crc = new Adler32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("Adler took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void md5(long dataSize) throws Exception {
+ long start = System.currentTimeMillis();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ MessageDigest digest = MessageDigest.getInstance("MD5");
+ for (long i = 0; i < iter; i++) {
+ digest.update(data);
+ }
+ digest.digest();
+ System.out.println("md5 took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java b/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
new file mode 100644
index 0000000..855744b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+
+public class ClusterAllocationRerouteBenchmark {
+
+ private static final ESLogger logger = Loggers.getLogger(ClusterAllocationRerouteBenchmark.class);
+
+ public static void main(String[] args) {
+ final int numberOfRuns = 1;
+ final int numIndices = 5 * 365; // five years
+ final int numShards = 6;
+ final int numReplicas = 2;
+ final int numberOfNodes = 30;
+ final int numberOfTags = 2;
+ AllocationService strategy = ElasticsearchAllocationTestCase.createAllocationService(ImmutableSettings.builder()
+ .put("cluster.routing.allocation.awareness.attributes", "tag")
+ .build(), new Random(1));
+
+ MetaData.Builder mb = MetaData.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ mb.put(IndexMetaData.builder("test_" + i).numberOfShards(numShards).numberOfReplicas(numReplicas));
+ }
+ MetaData metaData = mb.build();
+ RoutingTable.Builder rb = RoutingTable.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ rb.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rb.build();
+ DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
+ for (int i = 1; i <= numberOfNodes; i++) {
+ nb.put(ElasticsearchAllocationTestCase.newNode("node" + i, numberOfTags == 0 ? ImmutableMap.<String, String>of() : ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
+ }
+ ClusterState initialClusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).nodes(nb).build();
+
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < numberOfRuns; i++) {
+ logger.info("[{}] starting... ", i);
+ long runStart = System.currentTimeMillis();
+ ClusterState clusterState = initialClusterState;
+ while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
+ logger.info("[{}] remaining unassigned {}", i, clusterState.readOnlyRoutingNodes().unassigned().size());
+ RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ result = strategy.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ }
+ logger.info("[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
+ }
+ long took = System.currentTimeMillis() - start;
+ logger.info("total took {}, AVG {}", TimeValue.timeValueMillis(took), TimeValue.timeValueMillis(took / numberOfRuns));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
new file mode 100644
index 0000000..5858908
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.common.lucene.uidscan;
+
+import jsr166y.ThreadLocalRandom;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.FSDirectory;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.io.File;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class LuceneUidScanBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ FSDirectory dir = FSDirectory.open(new File("work/test"));
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ final int NUMBER_OF_THREADS = 2;
+ final long INDEX_COUNT = SizeValue.parseSizeValue("1m").singles();
+ final long SCAN_COUNT = SizeValue.parseSizeValue("100k").singles();
+ final long startUid = 1000000;
+
+ long LIMIT = startUid + INDEX_COUNT;
+ StopWatch watch = new StopWatch().start();
+ System.out.println("Indexing " + INDEX_COUNT + " docs...");
+ for (long i = startUid; i < LIMIT; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("_uid", Long.toString(i), Store.NO));
+ doc.add(new NumericDocValuesField("_version", i));
+ writer.addDocument(doc);
+ }
+ System.out.println("Done indexing, took " + watch.stop().lastTaskTime());
+
+ final IndexReader reader = DirectoryReader.open(writer, true);
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < SCAN_COUNT; i++) {
+ long id = startUid + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
+ final long version = Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
+ if (version != id) {
+ System.err.println("wrong id...");
+ break;
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+
+ watch = new StopWatch().start();
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ latch.await();
+ watch.stop();
+ System.out.println("Scanned in " + watch.totalTime() + " TP Seconds " + ((SCAN_COUNT * NUMBER_OF_THREADS) / watch.totalTime().secondsFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
new file mode 100644
index 0000000..e8cdc91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.common.recycler;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.recycler.Recycler;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.recycler.Recyclers.*;
+
+/** Benchmark that tries to measure the overhead of object recycling depending on concurrent access. */
+public class RecyclerBenchmark {
+
+ private static final long NUM_RECYCLES = 5000000L;
+ private static final Random RANDOM = new Random(0);
+
+ private static long bench(final Recycler<?> recycler, long numRecycles, int numThreads) throws InterruptedException {
+ final AtomicLong recycles = new AtomicLong(numRecycles);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final Thread[] threads = new Thread[numThreads];
+ for (int i = 0; i < numThreads; ++i){
+ // Thread ids happen to be generated sequentially, so we also generate random threads so that distribution of IDs
+ // is not perfect for the concurrent recycler
+ for (int j = RANDOM.nextInt(5); j >= 0; --j) {
+ new Thread();
+ }
+
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ return;
+ }
+ while (recycles.getAndDecrement() > 0) {
+ final Recycler.V<?> v = recycler.obtain();
+ v.release();
+ }
+ }
+ };
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ final long start = System.nanoTime();
+ latch.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ return System.nanoTime() - start;
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ final int limit = 100;
+ final Recycler.C<Object> c = new Recycler.C<Object>() {
+
+ @Override
+ public Object newInstance(int sizing) {
+ return new Object();
+ }
+
+ @Override
+ public void clear(Object value) {}
+ };
+
+ final ImmutableMap<String, Recycler<Object>> recyclers = ImmutableMap.<String, Recycler<Object>>builder()
+ .put("none", none(c))
+ .put("concurrent-queue", concurrentDeque(c, limit))
+ .put("thread-local", threadLocal(dequeFactory(c, limit)))
+ .put("soft-thread-local", threadLocal(softFactory(dequeFactory(c, limit))))
+ .put("locked", locked(deque(c, limit)))
+ .put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors()))
+ .put("soft-concurrent", concurrent(softFactory(dequeFactory(c, limit)), Runtime.getRuntime().availableProcessors())).build();
+
+ // warmup
+ final long start = System.nanoTime();
+ while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, 2);
+ }
+ }
+
+ // run
+ for (int numThreads = 1; numThreads <= 4 * Runtime.getRuntime().availableProcessors(); numThreads *= 2) {
+ System.out.println("## " + numThreads + " threads\n");
+ System.gc();
+ Thread.sleep(1000);
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, numThreads);
+ }
+ for (int i = 0; i < 5; ++i) {
+ for (Map.Entry<String, Recycler<Object>> entry : recyclers.entrySet()) {
+ System.out.println(entry.getKey() + "\t" + TimeUnit.NANOSECONDS.toMillis(bench(entry.getValue(), NUM_RECYCLES, numThreads)));
+ }
+ System.out.println();
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java b/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java
new file mode 100644
index 0000000..7ff45eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/common/util/BytesRefComparisonsBenchmark.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.common.util;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.UnsafeUtils;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+public class BytesRefComparisonsBenchmark {
+
+ private static final Random R = new Random(0);
+ private static final int ITERS = 100;
+
+ // To avoid JVM optimizations
+ @SuppressWarnings("unused")
+ private static boolean DUMMY;
+
+ enum Comparator {
+ SAFE {
+ boolean compare(BytesRef b1, BytesRef b2) {
+ return b1.bytesEquals(b2);
+ }
+ },
+ UNSAFE {
+ @Override
+ boolean compare(BytesRef b1, BytesRef b2) {
+ return UnsafeUtils.equals(b1, b2);
+ }
+ };
+ abstract boolean compare(BytesRef b1, BytesRef b2);
+ }
+
+ private static BytesRef[] buildBytesRefs(int minLen, int maxLen, int count, int uniqueCount) {
+ final BytesRef[] uniqueRefs = new BytesRef[uniqueCount];
+ for (int i = 0; i < uniqueCount; ++i) {
+ final int len = RandomInts.randomIntBetween(R, minLen, maxLen);
+ final byte[] bytes = new byte[len];
+ for (int j = 0; j < bytes.length; ++j) {
+ bytes[j] = (byte) R.nextInt(2); // so that some instances have common prefixes
+ }
+ uniqueRefs[i] = new BytesRef(bytes);
+ }
+ final BytesRef[] result = new BytesRef[count];
+ for (int i = 0; i < count; ++i) {
+ result[i] = RandomPicks.randomFrom(R, uniqueRefs);
+ }
+ int totalLen = 0;
+ for (BytesRef b : result) {
+ totalLen += b.length;
+ }
+ final byte[] data = new byte[totalLen];
+ int offset = 0;
+ for (int i = 0; i < count; ++i) {
+ final BytesRef b = result[i];
+ System.arraycopy(b.bytes, b.offset, data, offset, b.length);
+ result[i] = new BytesRef(data, offset, b.length);
+ offset += b.length;
+ }
+ if (offset != totalLen) {
+ throw new AssertionError();
+ }
+ return result;
+ }
+
+ private static long bench(Comparator comparator, BytesRef[] refs, int iters) {
+ boolean xor = false;
+ final long start = System.nanoTime();
+ for (int iter = 0; iter < iters; ++iter) {
+ for (int i = 0; i < refs.length; ++i) {
+ for (int j = i + 1; j < refs.length; ++j) {
+ xor ^= comparator.compare(refs[i], refs[j]);
+ }
+ }
+ }
+ DUMMY = xor;
+ return System.nanoTime() - start;
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ // warmup
+ BytesRef[] bytes = buildBytesRefs(2, 20, 1000, 100);
+ final long start = System.nanoTime();
+ while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, 1);
+ }
+ }
+
+ System.out.println("## Various lengths");
+ // make sure GC doesn't hurt results
+ System.gc();
+ Thread.sleep(2000);
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, ITERS);
+ }
+ for (int i = 0; i < 3; ++i) {
+ for (Comparator comparator : Comparator.values()) {
+ System.out.println(comparator + " " + new TimeValue(bench(comparator, bytes, ITERS), TimeUnit.NANOSECONDS));
+ }
+ }
+
+ for (int len = 2; len <= 20; ++len) {
+ System.out.println("## Length = " + len);
+ bytes = buildBytesRefs(len, len, 1000, 100);
+ System.gc();
+ Thread.sleep(2000);
+ for (Comparator comparator : Comparator.values()) {
+ bench(comparator, bytes, ITERS);
+ }
+ for (int i = 0; i < 3; ++i) {
+ for (Comparator comparator : Comparator.values()) {
+ System.out.println(comparator + " " + new TimeValue(bench(comparator, bytes, ITERS), TimeUnit.NANOSECONDS));
+ }
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java b/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
new file mode 100644
index 0000000..ea1e589
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.counter;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class SimpleCounterBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ final AtomicLong counter = new AtomicLong();
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java b/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java
new file mode 100644
index 0000000..72a7f7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/fielddata/LongFieldDataBenchmark.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.fielddata;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+
+import java.util.Random;
+
+public class LongFieldDataBenchmark {
+
+ private static final Random RANDOM = new Random();
+ private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
+
+ public static enum Data {
+ SINGLE_VALUES_DENSE_ENUM {
+ public int numValues() {
+ return 1;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextInt(16);
+ }
+ },
+ SINGLE_VALUED_DENSE_DATE {
+ public int numValues() {
+ return 1;
+ }
+
+ @Override
+ public long nextValue() {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_DATE {
+ public int numValues() {
+ return RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_ENUM {
+ public int numValues() {
+ return RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ return 3 + RANDOM.nextInt(8);
+ }
+ },
+ SINGLE_VALUED_SPARSE_RANDOM {
+ public int numValues() {
+ return RANDOM.nextFloat() < 0.1f ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ },
+ MULTI_VALUED_SPARSE_RANDOM {
+ public int numValues() {
+ return RANDOM.nextFloat() < 0.1f ? 1 + RANDOM.nextInt(5) : 0;
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ },
+ MULTI_VALUED_DENSE_RANDOM {
+ public int numValues() {
+ return 1 + RANDOM.nextInt(3);
+ }
+
+ @Override
+ public long nextValue() {
+ return RANDOM.nextLong();
+ }
+ };
+
+ public abstract int numValues();
+
+ public abstract long nextValue();
+ }
+
+ public static void main(String[] args) throws Exception {
+ final IndexWriterConfig iwc = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
+ final String fieldName = "f";
+ final int numDocs = 1000000;
+ System.out.println("Data\tLoading time\tImplementation\tActual size\tExpected size");
+ for (Data data : Data.values()) {
+ final RAMDirectory dir = new RAMDirectory();
+ final IndexWriter indexWriter = new IndexWriter(dir, iwc);
+ for (int i = 0; i < numDocs; ++i) {
+ final Document doc = new Document();
+ final int numFields = data.numValues();
+ for (int j = 0; j < numFields; ++j) {
+ doc.add(new LongField(fieldName, data.nextValue(), Store.NO));
+ }
+ indexWriter.addDocument(doc);
+ }
+ Merges.forceMerge(indexWriter, 1);
+ indexWriter.close();
+
+ final DirectoryReader dr = DirectoryReader.open(dir);
+ final IndexFieldDataService fds = new IndexFieldDataService(new Index("dummy"), new DummyCircuitBreakerService());
+ final LongFieldMapper mapper = new LongFieldMapper.Builder(fieldName).build(new BuilderContext(null, new ContentPath(1)));
+ final IndexNumericFieldData<AtomicNumericFieldData> fd = fds.getForField(mapper);
+ final long start = System.nanoTime();
+ final AtomicNumericFieldData afd = fd.loadDirect(SlowCompositeReaderWrapper.wrap(dr).getContext());
+ final long loadingTimeMs = (System.nanoTime() - start) / 1000 / 1000;
+ System.out.println(data + "\t" + loadingTimeMs + "\t" + afd.getClass().getSimpleName() + "\t" + RamUsageEstimator.humanSizeOf(afd.getLongValues()) + "\t" + RamUsageEstimator.humanReadableUnits(afd.getMemorySizeInBytes()));
+ dr.close();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java b/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
new file mode 100644
index 0000000..f8ec2cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.fs;
+
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+
+/**
+ *
+ */
+public class FsAppendBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ new File("work/test.log").delete();
+ RandomAccessFile raf = new RandomAccessFile("work/test.log", "rw");
+ raf.setLength(0);
+
+ boolean CHANNEL = true;
+ int CHUNK = (int) ByteSizeValue.parseBytesSizeValue("1k").bytes();
+ long DATA = ByteSizeValue.parseBytesSizeValue("10gb").bytes();
+
+ byte[] data = new byte[CHUNK];
+ new Random().nextBytes(data);
+
+ StopWatch watch = new StopWatch().start("write");
+ if (CHANNEL) {
+ FileChannel channel = raf.getChannel();
+ long position = 0;
+ while (position < DATA) {
+ channel.write(ByteBuffer.wrap(data), position);
+ position += data.length;
+ }
+ watch.stop().start("flush");
+ channel.force(true);
+ } else {
+ long position = 0;
+ while (position < DATA) {
+ raf.write(data);
+ position += data.length;
+ }
+ watch.stop().start("flush");
+ raf.getFD().sync();
+ }
+ raf.close();
+ watch.stop();
+ System.out.println("Wrote [" + (new ByteSizeValue(DATA)) + "], chunk [" + (new ByteSizeValue(CHUNK)) + "], in " + watch);
+ }
+
+ private static final ByteBuffer fill = ByteBuffer.allocateDirect(1);
+
+// public static long padLogFile(long position, long currentSize, long preAllocSize) throws IOException {
+// if (position + 4096 >= currentSize) {
+// currentSize = currentSize + preAllocSize;
+// fill.position(0);
+// f.getChannel().write(fill, currentSize - fill.remaining());
+// }
+// return currentSize;
+// }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java b/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
new file mode 100644
index 0000000..d78df7f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.get;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+// simple test for embedded / single remote lookup
+public class SimpleGetActionBenchmark {
+
+ public static void main(String[] args) {
+ long OPERATIONS = SizeValue.parseSizeValue("300k").singles();
+
+ Node node = NodeBuilder.nodeBuilder().node();
+
+ Client client;
+ if (false) {
+ client = NodeBuilder.nodeBuilder().client(true).node().client();
+ } else {
+ client = node.client();
+ }
+
+ client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (long i = 0; i < OPERATIONS; i++) {
+ client.prepareGet("test", "type1", "1").execute().actionGet();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));
+
+ node.close();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java b/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
new file mode 100644
index 0000000..ed90a5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.hppc;
+
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+
+public class StringMapAdjustOrPutBenchmark {
+
+ public static void main(String[] args) {
+
+ int NUMBER_OF_KEYS = (int) SizeValue.parseSizeValue("20").singles();
+ int STRING_SIZE = 5;
+ long PUT_OPERATIONS = SizeValue.parseSizeValue("5m").singles();
+ long ITERATIONS = 10;
+ boolean REUSE = true;
+
+
+ String[] values = new String[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), STRING_SIZE);
+ }
+
+ StopWatch stopWatch;
+
+ stopWatch = new StopWatch().start();
+ ObjectIntOpenHashMap<String> map = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ map.clear();
+ } else {
+ map = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ map.clear();
+ map = null;
+
+ stopWatch.stop();
+ System.out.println("TObjectIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+// TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
+ ObjectIntOpenHashMap<String> iMap = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(StringIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ stopWatch = new StopWatch().start();
+ iMap = new ObjectIntOpenHashMap<String>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntOpenHashMap<String>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(PureIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ ObjectObjectOpenHashMap<String, StringEntry> tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tMap.clear();
+ } else {
+ tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = tMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ tMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ tMap.clear();
+ tMap = null;
+
+ stopWatch.stop();
+ System.out.println("THashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+ HashMap<String, StringEntry> hMap = new HashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ hMap.clear();
+ } else {
+ hMap = new HashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = hMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ hMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ hMap.clear();
+ hMap = null;
+
+ stopWatch.stop();
+ System.out.println("HashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+
+ stopWatch = new StopWatch().start();
+ IdentityHashMap<String, StringEntry> ihMap = new IdentityHashMap<String, StringEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ ihMap.clear();
+ } else {
+ hMap = new HashMap<String, StringEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = ihMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ ihMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+ stopWatch.stop();
+ System.out.println("IdentityHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ ihMap.clear();
+ ihMap = null;
+
+ int[] iValues = new int[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ iValues[i] = ThreadLocalRandom.current().nextInt();
+ }
+
+ stopWatch = new StopWatch().start();
+ IntIntOpenHashMap intMap = new IntIntOpenHashMap();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ intMap.clear();
+ } else {
+ intMap = new IntIntOpenHashMap();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ intMap.addTo(key, 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TIntIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ intMap.clear();
+ intMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ IntObjectOpenHashMap<IntEntry> tIntMap = new IntObjectOpenHashMap<IntEntry>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tIntMap.clear();
+ } else {
+ tIntMap = new IntObjectOpenHashMap<IntEntry>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ IntEntry intEntry = tIntMap.get(key);
+ if (intEntry == null) {
+ intEntry = new IntEntry(key, 1);
+ tIntMap.put(key, intEntry);
+ } else {
+ intEntry.counter++;
+ }
+ }
+ }
+
+ tIntMap.clear();
+ tIntMap = null;
+
+ stopWatch.stop();
+ System.out.println("TIntObjectHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ }
+
+
+ static class StringEntry {
+ String key;
+ int counter;
+
+ StringEntry(String key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+
+ static class IntEntry {
+ int key;
+ int counter;
+
+ IntEntry(int key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java b/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
new file mode 100644
index 0000000..7d6096c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.percolator;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.percolator.PercolatorService;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class PercolatorStressBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("cluster.routing.schedule", 200, TimeUnit.MILLISECONDS)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, 4)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node clientNode = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Client client = clientNode.client();
+
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth("test")
+ .setWaitForGreenStatus()
+ .execute().actionGet();
+ if (healthResponse.isTimedOut()) {
+ System.err.println("Quiting, because cluster health requested timed out...");
+ return;
+ } else if (healthResponse.getStatus() != ClusterHealthStatus.GREEN) {
+ System.err.println("Quiting, because cluster state isn't green...");
+ return;
+ }
+
+ int COUNT = 200000;
+ int QUERIES = 100;
+ int TERM_QUERIES = QUERIES / 2;
+ int RANGE_QUERIES = QUERIES - TERM_QUERIES;
+
+ client.prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("numeric1", 1).endObject()).execute().actionGet();
+
+ // register queries
+ int i = 0;
+ for (; i < TERM_QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("name", "value"))
+ .endObject())
+ .execute().actionGet();
+ }
+
+ int[] numbers = new int[RANGE_QUERIES];
+ for (; i < QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("numeric1").from(i).to(i))
+ .endObject())
+ .execute().actionGet();
+ numbers[i - TERM_QUERIES] = i;
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Percolating [" + COUNT + "] ...");
+ for (i = 1; i <= COUNT; i++) {
+ XContentBuilder source;
+ int expectedMatches;
+ if (i % 2 == 0) {
+ source = source(Integer.toString(i), "value");
+ expectedMatches = TERM_QUERIES;
+ } else {
+ int number = numbers[i % RANGE_QUERIES];
+ source = source(Integer.toString(i), number);
+ expectedMatches = 1;
+ }
+ PercolateResponse percolate = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(source)
+ .execute().actionGet();
+ if (percolate.getMatches().length != expectedMatches) {
+ System.err.println("No matching number of queries");
+ }
+
+ if ((i % 10000) == 0) {
+ System.out.println("Percolated " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Percolation took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ clientNode.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("name", nameValue)
+ .endObject().endObject();
+ }
+
+ private static XContentBuilder source(String id, int number) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("numeric1", number)
+ .field("numeric2", number)
+ .field("numeric3", number)
+ .field("numeric4", number)
+ .field("numeric5", number)
+ .field("numeric6", number)
+ .field("numeric7", number)
+ .field("numeric8", number)
+ .field("numeric9", number)
+ .field("numeric10", number)
+ .endObject().endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
new file mode 100644
index 0000000..61b2074
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score;
+
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.joda.time.DateTime;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.security.SecureRandom;
+import java.util.*;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+public class BasicScriptBenchmark {
+
+ public static class RequestInfo {
+ public RequestInfo(SearchRequest source, int i) {
+ request = source;
+ numTerms = i;
+ }
+
+ SearchRequest request;
+ int numTerms;
+ }
+
+ public static class Results {
+ public static final String TIME_PER_DOCIN_MILLIS = "timePerDocinMillis";
+ public static final String NUM_TERMS = "numTerms";
+ public static final String NUM_DOCS = "numDocs";
+ public static final String TIME_PER_QUERY_IN_SEC = "timePerQueryInSec";
+ public static final String TOTAL_TIME_IN_SEC = "totalTimeInSec";
+ Double[] resultSeconds;
+ Double[] resultMSPerQuery;
+ Long[] numDocs;
+ Integer[] numTerms;
+ Double[] timePerDoc;
+ String label;
+ String description;
+ public String lineStyle;
+ public String color;
+
+ void init(int numVariations, String label, String description, String color, String lineStyle) {
+ resultSeconds = new Double[numVariations];
+ resultMSPerQuery = new Double[numVariations];
+ numDocs = new Long[numVariations];
+ numTerms = new Integer[numVariations];
+ timePerDoc = new Double[numVariations];
+ this.label = label;
+ this.description = description;
+ this.color = color;
+ this.lineStyle = lineStyle;
+ }
+
+ void set(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter, int which, int numTerms) {
+ resultSeconds[which] = (double) ((double) stopWatch.lastTaskTime().getMillis() / (double) 1000);
+ resultMSPerQuery[which] = (double) ((double) stopWatch.lastTaskTime().secondsFrac() / (double) maxIter);
+ numDocs[which] = searchResponse.getHits().totalHits();
+ this.numTerms[which] = numTerms;
+ timePerDoc[which] = resultMSPerQuery[which] / numDocs[which];
+ }
+
+ public void printResults(BufferedWriter writer) throws IOException {
+ String comma = (writer == null) ? "" : ";";
+ String results = description + "\n" + Results.TOTAL_TIME_IN_SEC + " = " + getResultArray(resultSeconds) + comma + "\n"
+ + Results.TIME_PER_QUERY_IN_SEC + " = " + getResultArray(resultMSPerQuery) + comma + "\n" + Results.NUM_DOCS + " = "
+ + getResultArray(numDocs) + comma + "\n" + Results.NUM_TERMS + " = " + getResultArray(numTerms) + comma + "\n"
+ + Results.TIME_PER_DOCIN_MILLIS + " = " + getResultArray(timePerDoc) + comma + "\n";
+ if (writer != null) {
+ writer.write(results);
+ } else {
+ System.out.println(results);
+ }
+
+ }
+
+ private String getResultArray(Object[] resultArray) {
+ String result = "[";
+ for (int i = 0; i < resultArray.length; i++) {
+ result += resultArray[i].toString();
+ if (i != resultArray.length - 1) {
+ result += ",";
+ }
+ }
+ result += "]";
+ return result;
+ }
+ }
+
+ public BasicScriptBenchmark() {
+ }
+
+ static List<String> termsList = new ArrayList<String>();
+
+ static void init(int numTerms) {
+ SecureRandom random = new SecureRandom();
+ random.setSeed(1);
+ termsList.clear();
+ for (int i = 0; i < numTerms; i++) {
+ String term = new BigInteger(512, random).toString(32);
+ termsList.add(term);
+ }
+
+ }
+
+ static String[] getTerms(int numTerms) {
+ String[] terms = new String[numTerms];
+ for (int i = 0; i < numTerms; i++) {
+ terms[i] = termsList.get(i);
+ }
+ return terms;
+ }
+
+ public static void writeHelperFunction() throws IOException {
+ File file = new File("addToPlot.m");
+ BufferedWriter out = Files.newWriter(file, Charsets.UTF_8);
+
+ out.write("function handle = addToPlot(numTerms, perDoc, color, linestyle, linewidth)\n" + "handle = line(numTerms, perDoc);\n"
+ + "set(handle, 'color', color);\n" + "set(handle, 'linestyle',linestyle);\n" + "set(handle, 'LineWidth',linewidth);\n"
+ + "end\n");
+ out.close();
+ }
+
+ public static void printOctaveScript(List<Results> allResults, String[] args) throws IOException {
+ if (args.length == 0) {
+ return;
+ }
+ BufferedWriter out = null;
+ try {
+ File file = new File(args[0]);
+ out = Files.newWriter(file, Charsets.UTF_8);
+
+ out.write("#! /usr/local/bin/octave -qf");
+ out.write("\n\n\n\n");
+ out.write("######################################\n");
+ out.write("# Octave script for plotting results\n");
+ String filename = "scriptScoreBenchmark" + new DateTime().toString();
+ out.write("#Call '" + args[0] + "' from the command line. The plot is then in " + filename + "\n\n");
+
+ out.write("handleArray = [];\n tagArray = [];\n plot([]);\n hold on;\n");
+ for (Results result : allResults) {
+ out.write("\n");
+ out.write("# " + result.description);
+ result.printResults(out);
+ out.write("handleArray = [handleArray, addToPlot(" + Results.NUM_TERMS + ", " + Results.TIME_PER_DOCIN_MILLIS + ", '"
+ + result.color + "','" + result.lineStyle + "',5)];\n");
+ out.write("tagArray = [tagArray; '" + result.label + "'];\n");
+ out.write("\n");
+ }
+
+ out.write("xlabel(\'number of query terms');");
+ out.write("ylabel(\'query time per document');");
+
+ out.write("legend(handleArray,tagArray);\n");
+
+ out.write("saveas(gcf,'" + filename + ".png','png')\n");
+ out.write("hold off;\n\n");
+ } catch (IOException e) {
+ System.err.println("Error: " + e.getMessage());
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ writeHelperFunction();
+ }
+
+ static void printResult(SearchResponse searchResponse, StopWatch stopWatch, String queryInfo) {
+ System.out.println("--> Searching with " + queryInfo + " took " + stopWatch.lastTaskTime() + ", per query "
+ + (stopWatch.lastTaskTime().secondsFrac() / 100) + " for " + searchResponse.getHits().totalHits() + " docs");
+ }
+
+ static void indexData(long numDocs, Client client, boolean randomizeTerms) throws IOException {
+ try {
+ client.admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Throwable t) {
+ // index might exist already, in this case we do nothing TODO: make
+ // saver in general
+ }
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").field("index_options", "offsets").field("analyzer", "payload_float")
+ .endObject().endObject().endObject().endObject();
+ client.admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping("type1", mapping)
+ .setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.number_of_replicas", 0).put("index.number_of_shards", 1)).execute().actionGet();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ Random random = new Random(1);
+ for (int i = 0; i < numDocs; i++) {
+
+ bulkRequest.add(client.prepareIndex().setType("type1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("text", randomText(random, randomizeTerms)).endObject()));
+ if (i % 1000 == 0) {
+ bulkRequest.execute().actionGet();
+ bulkRequest = client.prepareBulk();
+ }
+ }
+ bulkRequest.execute().actionGet();
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+ client.admin().indices().prepareFlush("test").setFull(true).execute().actionGet();
+ System.out.println("Done indexing " + numDocs + " documents");
+
+ }
+
+ private static String randomText(Random random, boolean randomizeTerms) {
+ String text = "";
+ for (int i = 0; i < termsList.size(); i++) {
+ if (random.nextInt(5) == 3 || !randomizeTerms) {
+ text = text + " " + termsList.get(i) + "|1";
+ }
+ }
+ return text;
+ }
+
+ static void printTimings(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter) {
+ System.out.println(message);
+ System.out.println(stopWatch.lastTaskTime() + ", " + (stopWatch.lastTaskTime().secondsFrac() / maxIter) + ", "
+ + searchResponse.getHits().totalHits() + ", "
+ + (stopWatch.lastTaskTime().secondsFrac() / (maxIter + searchResponse.getHits().totalHits())));
+ }
+
+ static List<Entry<String, RequestInfo>> initTermQueries(int minTerms, int maxTerms) {
+ List<Entry<String, RequestInfo>> termSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<String, Object>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0).query(QueryBuilders.termsQuery("text", terms)));
+ String infoString = "Results for term query with " + (nTerms + 1) + " terms:";
+ termSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return termSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initNativeSearchRequests(int minTerms, int maxTerms, String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<String, Object>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ String infoString = "Results for native script with " + (nTerms + 1) + " terms:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(script, "native", params) : scriptFunction(
+ script, params);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(false)
+ .size(0)
+ .query(functionScoreQuery(FilterBuilders.termsFilter("text", terms), scriptFunction).boostMode(
+ CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return nativeSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initScriptMatchAllSearchRequests(String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<Entry<String, RequestInfo>>();
+ String infoString = "Results for constant score script:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(script, "native") : scriptFunction(script);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0)
+ .query(functionScoreQuery(FilterBuilders.matchAllFilter(), scriptFunction).boostMode(CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<String, RequestInfo>(infoString, new RequestInfo(request, 0)));
+
+ return nativeSearchRequests;
+ }
+
+ static void runBenchmark(Client client, int maxIter, Results results, List<Entry<String, RequestInfo>> nativeSearchRequests,
+ int minTerms, int warmerIter) throws IOException {
+ int counter = 0;
+ for (Entry<String, RequestInfo> entry : nativeSearchRequests) {
+ SearchResponse searchResponse = null;
+ // warm up
+ for (int i = 0; i < warmerIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ System.gc();
+ // run benchmark
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ for (int i = 0; i < maxIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ stopWatch.stop();
+ results.set(searchResponse, stopWatch, entry.getKey(), maxIter, counter, entry.getValue().numTerms);
+ counter++;
+ }
+ results.printResults(null);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
new file mode 100644
index 0000000..774e295
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsConstantScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 49;
+ int maxTerms = 50;
+ int maxIter = 1000;
+ int warmerIter = 1000;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsConstantScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, true);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+
+ results.init(maxTerms - minTerms, "native const script score (log(2) 10X)",
+ "Results for native const script score with score = log(2) 10X:", "black", "-.");
+ // init script searches
+ List<Entry<String, RequestInfo>> searchRequests = initScriptMatchAllSearchRequests(
+ NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ // init native script searches
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (log(2) 10X)", "Results for mvel const score = log(2) 10X:", "red", "-.");
+ searchRequests = initScriptMatchAllSearchRequests("score = 0; for (int i=0; i<10;i++) {score = score + log(2);} return score",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "native const script score (2)", "Results for native const script score with score = 2:",
+ "black", ":");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (2)", "Results for mvel const score = 2:", "red", "--");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests("2", false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
new file mode 100644
index 0000000..a78830e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ boolean runMVEL = false;
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score dense posting list",
+ "Results for native script score with dense posting list:", "black", "--");
+ // init native script searches
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query dense posting list", "Results for term query with dense posting lists:", "green",
+ "--");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf dense posting list", "Results for mvel score with dense posting list:", "red",
+ "--");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+
+ indexData(10000, client, true);
+ results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score sparse posting list",
+ "Results for native script scorewith sparse posting list:", "black", "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms, NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query sparse posting list", "Results for term query with sparse posting lists:", "green",
+ "-.");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf sparse posting list", "Results for mvel score with sparse posting list:", "red",
+ "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
new file mode 100644
index 0000000..592ffe1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScorePayloadSumBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score", "Results for native script score:", "green", ":");
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score no record", "Results for native script score:", "black", ":");
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
new file mode 100644
index 0000000..0d90d9f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score.plugin;
+
+import org.elasticsearch.benchmark.scripts.score.script.*;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptModule;
+
+public class NativeScriptExamplesPlugin extends AbstractPlugin {
+
+
+ @Override
+ public String name() {
+ return "native-script-example";
+ }
+
+ @Override
+ public String description() {
+ return "Native script examples";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.registerScript(NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, NativeNaiveTFIDFScoreScript.Factory.class);
+ module.registerScript(NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, NativeConstantForLoopScoreScript.Factory.class);
+ module.registerScript(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, NativeConstantScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, NativePayloadSumScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, NativePayloadSumNoRecordScoreScript.Factory.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
new file mode 100644
index 0000000..c61a40d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantForLoopScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE = "native_constant_for_loop_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantForLoopScoreScript(params);
+ }
+ }
+
+ private NativeConstantForLoopScoreScript(Map<String, Object> params) {
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ for (int i = 0; i < 10; i++) {
+ score += Math.log(2);
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
new file mode 100644
index 0000000..6d07242
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_SCRIPT_SCORE = "native_constant_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantScoreScript();
+ }
+ }
+
+ private NativeConstantScoreScript() {
+ }
+
+ @Override
+ public Object run() {
+ return 2;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
new file mode 100644
index 0000000..1f88e66
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_NAIVE_TFIDF_SCRIPT_SCORE = "native_naive_tfidf_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeNaiveTFIDFScoreScript(params);
+ }
+ }
+
+ private NativeNaiveTFIDFScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i]);
+ try {
+ if (indexFieldTerm.tf() != 0) {
+ score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
+ }
+ } catch (IOException e) {
+ throw new RuntimeException();
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
new file mode 100644
index 0000000..825b31e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumNoRecordScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE = "native_payload_sum_no_record_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumNoRecordScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumNoRecordScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
new file mode 100644
index 0000000..0172c56
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_SCRIPT_SCORE = "native_payload_sum_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS | IndexLookup.FLAG_CACHE);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java b/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
new file mode 100644
index 0000000..3f7811c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class SuggestSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ int SEARCH_ITERS = 200;
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Client client = nodes[0].client();
+ try {
+ client.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("10m").singles();
+ int BATCH = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ char character = 'a';
+ int idCounter = 0;
+ for (; i <= ITERS; i++) {
+ int termCounter = 0;
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(idCounter++)).source(source("prefix" + character + termCounter++)));
+ }
+ character++;
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime());
+
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ }
+
+
+ System.out.println("Warming up...");
+ char startChar = 'a';
+ for (int i = 0; i <= 20; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(prefixQuery("field", term))
+ .addSuggestion(SuggestBuilder.termSuggestion("field").field("field").text(term).suggestMode("always"))
+ .execute().actionGet();
+ if (response.getHits().totalHits() == 0) {
+ System.err.println("No hits");
+ continue;
+ }
+ startChar++;
+ }
+
+
+ System.out.println("Starting benchmarking suggestions.");
+ startChar = 'a';
+ long timeTaken = 0;
+ for (int i = 0; i <= SEARCH_ITERS; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(matchQuery("field", term))
+ .addSuggestion(SuggestBuilder.termSuggestion("field").text(term).field("field").suggestMode("always"))
+ .execute().actionGet();
+ timeTaken += response.getTookInMillis();
+ if (response.getSuggest() == null) {
+ System.err.println("No suggestions");
+ continue;
+ }
+ List<? extends Option> options = response.getSuggest().getSuggestion("field").getEntries().get(0).getOptions();
+ if (options == null || options.isEmpty()) {
+ System.err.println("No suggestions");
+ }
+ startChar++;
+ }
+
+ System.out.println("Avg time taken without filter " + (timeTaken / SEARCH_ITERS));
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String nameValue) throws IOException {
+ return jsonBuilder().startObject()
+ .field("field", nameValue)
+ .endObject();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
new file mode 100644
index 0000000..ae01a9e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.facet.FacetBuilder;
+
+import java.util.Date;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.search.facet.FacetBuilders.dateHistogramFacet;
+import static org.elasticsearch.search.facet.FacetBuilders.histogramFacet;
+
+/**
+ *
+ */
+public class HistogramAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("20m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_WARMUP = 5;
+ static final int QUERY_COUNT = 20;
+ static final int NUMBER_OF_TERMS = 1000;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = HistogramAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+
+ //Node clientNode = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = i;
+ }
+
+ Random r = new Random();
+ try {
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put(settings))
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("l_value")
+ .field("type", "long")
+ .endObject()
+ .startObject("i_value")
+ .field("type", "integer")
+ .endObject()
+ .startObject("s_value")
+ .field("type", "short")
+ .endObject()
+ .startObject("b_value")
+ .field("type", "byte")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long iters = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= iters; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ final long value = lValues[r.nextInt(lValues.length)];
+ XContentBuilder source = jsonBuilder().startObject()
+ .field("id", Integer.valueOf(counter))
+ .field("l_value", value)
+ .field("i_value", (int) value)
+ .field("s_value", (short) value)
+ .field("b_value", (byte) value)
+ .field("date", new Date())
+ .endObject();
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(source));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ System.out.println("--> Warmup...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("l_value").field("l_value").interval(4))
+ .addFacet(histogramFacet("i_value").field("i_value").interval(4))
+ .addFacet(histogramFacet("s_value").field("s_value").interval(4))
+ .addFacet(histogramFacet("b_value").field("b_value").interval(4))
+ .addFacet(histogramFacet("date").field("date").interval(1000))
+ .addAggregation(histogram("l_value").field("l_value").interval(4))
+ .addAggregation(histogram("i_value").field("i_value").interval(4))
+ .addAggregation(histogram("s_value").field("s_value").interval(4))
+ .addAggregation(histogram("b_value").field("b_value").interval(4))
+ .addAggregation(histogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup DONE");
+
+ long totalQueryTime = 0;
+ for (String field : new String[] {"b_value", "s_value", "i_value", "l_value"}) {
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet(field).field(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet(field).field(field).valueField(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).subAggregation(stats(field).field(field)).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("date").field("date").valueField("l_value").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Facet (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000).subAggregation(stats("stats").field("l_value")))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.COLLECTOR))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Date Histogram Facet (mode/collector) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.POST))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Date Histogram Facet (mode/post) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ node1.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
new file mode 100644
index 0000000..40345fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.facet.FacetBuilder;
+import org.elasticsearch.search.facet.FacetBuilders;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+public class QueryFilterAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("5m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_COUNT = 200;
+ static final int NUMBER_OF_TERMS = 200;
+
+ static Client client;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = QueryFilterAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ builder.field("l_value", lValues[ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS)]);
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 100000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ final long anyValue = ((Number) client.prepareSearch().execute().actionGet().getHits().hits()[0].sourceAsMap().get("l_value")).longValue();
+
+ long totalQueryTime = 0;
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Simple Query on first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addAggregation(AggregationBuilders.filter("filter").filter(FilterBuilders.termFilter("l_value", anyValue)))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Filter agg first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)).global(true).mode(FacetBuilder.Mode.COLLECTOR))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value (global) (mode/collector) " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(termQuery("l_value", anyValue))
+ .addFacet(FacetBuilders.queryFacet("query").query(termQuery("l_value", anyValue)).global(true).mode(FacetBuilder.Mode.POST))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Query facet first l_value (global) (mode/post) " + totalQueryTime + "ms");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
new file mode 100644
index 0000000..c12f24f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.search.facet.FacetBuilders.termsStatsFacet;
+
+/**
+ *
+ */
+public class TermsAggregationSearchBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("2m").singles();
+ static int BATCH = 1000;
+ static int QUERY_WARMUP = 10;
+ static int QUERY_COUNT = 100;
+ static int NUMBER_OF_TERMS = 200;
+ static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
+ static int STRING_TERM_SIZE = 5;
+
+ static Client client;
+
+ private enum Method {
+ FACET {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addFacet(termsFacet(name).field(field).executionHint(executionHint));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addFacet(termsStatsFacet(name).keyField(keyField).valueField(valueField));
+ }
+ },
+ AGGREGATION {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
+ }
+ };
+ abstract SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint);
+ abstract SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField);
+ }
+
+ public static void main(String[] args) throws Exception {
+ Random random = new Random();
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = TermsAggregationSearchBenchmark.class.getSimpleName();
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ client = clientNode.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+ String[] sValues = new String[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ sValues[i] = RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE);
+ }
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("s_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("sm_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("l_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("lm_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ final String sValue = sValues[counter % sValues.length];
+ final long lValue = lValues[counter % lValues.length];
+ builder.field("s_value", sValue);
+ builder.field("l_value", lValue);
+ builder.field("s_value_dv", sValue);
+ builder.field("l_value_dv", lValue);
+
+ for (String field : new String[] {"sm_value", "sm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ for (String field : new String[] {"lm_value", "lm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(lValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ COUNT = client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+
+ List<StatsResult> stats = Lists.newArrayList();
+ stats.add(terms("terms_facet_s", Method.FACET, "s_value", null));
+ stats.add(terms("terms_facet_s_dv", Method.FACET, "s_value_dv", null));
+ stats.add(terms("terms_facet_map_s", Method.FACET, "s_value", "map"));
+ stats.add(terms("terms_facet_map_s_dv", Method.FACET, "s_value_dv", "map"));
+ stats.add(terms("terms_agg_s", Method.AGGREGATION, "s_value", null));
+ stats.add(terms("terms_agg_s_dv", Method.AGGREGATION, "s_value_dv", null));
+ stats.add(terms("terms_agg_map_s", Method.AGGREGATION, "s_value", "map"));
+ stats.add(terms("terms_agg_map_s_dv", Method.AGGREGATION, "s_value_dv", "map"));
+ stats.add(terms("terms_facet_l", Method.FACET, "l_value", null));
+ stats.add(terms("terms_facet_l_dv", Method.FACET, "l_value_dv", null));
+ stats.add(terms("terms_agg_l", Method.AGGREGATION, "l_value", null));
+ stats.add(terms("terms_agg_l_dv", Method.AGGREGATION, "l_value_dv", null));
+ stats.add(terms("terms_facet_sm", Method.FACET, "sm_value", null));
+ stats.add(terms("terms_facet_sm_dv", Method.FACET, "sm_value_dv", null));
+ stats.add(terms("terms_facet_map_sm", Method.FACET, "sm_value", "map"));
+ stats.add(terms("terms_facet_map_sm_dv", Method.FACET, "sm_value_dv", "map"));
+ stats.add(terms("terms_agg_sm", Method.AGGREGATION, "sm_value", null));
+ stats.add(terms("terms_agg_sm_dv", Method.AGGREGATION, "sm_value_dv", null));
+ stats.add(terms("terms_agg_map_sm", Method.AGGREGATION, "sm_value", "map"));
+ stats.add(terms("terms_agg_map_sm_dv", Method.AGGREGATION, "sm_value_dv", "map"));
+ stats.add(terms("terms_facet_lm", Method.FACET, "lm_value", null));
+ stats.add(terms("terms_facet_lm_dv", Method.FACET, "lm_value_dv", null));
+ stats.add(terms("terms_agg_lm", Method.AGGREGATION, "lm_value", null));
+ stats.add(terms("terms_agg_lm_dv", Method.AGGREGATION, "lm_value_dv", null));
+
+ stats.add(termsStats("terms_stats_facet_s_l", Method.FACET, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_facet_s_l_dv", Method.FACET, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_facet_s_lm", Method.FACET, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_facet_s_lm_dv", Method.FACET, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_facet_sm_l", Method.FACET, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_facet_sm_l_dv", Method.FACET, "sm_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
+
+ System.out.println("------------------ SUMMARY -------------------------------");
+ System.out.format("%25s%10s%10s\n", "name", "took", "millis");
+ for (StatsResult stat : stats) {
+ System.out.format("%25s%10s%10d\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT));
+ }
+ System.out.println("------------------ SUMMARY -------------------------------");
+
+ clientNode.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ static class StatsResult {
+ final String name;
+ final long took;
+
+ StatsResult(String name, long took) {
+ this.name = name;
+ this.took = took;
+ }
+ }
+
+ private static StatsResult terms(String name, Method method, String field, String executionHint) {
+ long totalQueryTime;// LM VALUE
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime);
+ }
+
+ private static StatsResult termsStats(String name, Method method, String keyField, String valueField, String executionHint) {
+ long totalQueryTime;
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + name + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms stats agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
new file mode 100644
index 0000000..ed75ddb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.hasChildFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchAndIndexingBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("1m").singles();
+ static int CHILD_COUNT = 5;
+ static int BATCH = 100;
+ static int QUERY_COUNT = 50;
+ static String indexName = "test";
+ static Random random = new Random();
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchAndIndexingBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node1"))
+ .clusterName(clusterName)
+ .node();
+ Client client = node1.client();
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] parent document and [" + (COUNT * CHILD_COUNT) + " child documents");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(Integer.toString(counter), "test" + counter)));
+ for (int k = 0; k < CHILD_COUNT; k++) {
+ request.add(Requests.indexRequest(indexName).type("child").id(Integer.toString(counter) + "_" + k)
+ .parent(Integer.toString(counter))
+ .source(childSource(Integer.toString(counter), "tag" + k)));
+ }
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) * (1 + CHILD_COUNT) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT * (1 + CHILD_COUNT))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ SearchThread searchThread = new SearchThread(client);
+ new Thread(searchThread).start();
+ IndexThread indexThread = new IndexThread(client);
+ new Thread(indexThread).start();
+
+ System.in.read();
+
+ indexThread.stop();
+ searchThread.stop();
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+
+ private static XContentBuilder childSource(String id, String tag) throws IOException {
+ return jsonBuilder().startObject().field("id", id).field("tag", tag).endObject();
+ }
+
+ static class IndexThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+
+ IndexThread(Client client) {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ for (int i = 1; run && i < COUNT; i++) {
+ try {
+ client.prepareIndex(indexName, "parent", Integer.toString(i))
+ .setSource(parentSource(Integer.toString(i), "test" + i)).execute().actionGet();
+ for (int j = 0; j < CHILD_COUNT; j++) {
+ client.prepareIndex(indexName, "child", Integer.toString(i) + "_" + j)
+ .setParent(Integer.toString(i))
+ .setSource(childSource(Integer.toString(j), "tag" + j)).execute().actionGet();
+ }
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(100);
+ if (i % 500 == 0) {
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .clear().setIndices(true).execute().actionGet();
+ System.out.println("Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted());
+ }
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+ static class SearchThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+
+ SearchThread(Client client) {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ try {
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag" + random.nextInt(CHILD_COUNT)))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+// System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with term filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+// System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + expected + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ Thread.sleep(1000);
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
new file mode 100644
index 0000000..b7432bf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
@@ -0,0 +1,434 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+
+ long COUNT = SizeValue.parseSizeValue("10m").singles();
+ int CHILD_COUNT = 5;
+ int BATCH = 100;
+ int QUERY_WARMUP = 20;
+ int QUERY_COUNT = 50;
+ String indexName = "test";
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] parent document and [" + (COUNT * CHILD_COUNT) + " child documents");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(counter, "test" + counter)));
+ for (int k = 0; k < CHILD_COUNT; k++) {
+ request.add(Requests.indexRequest(indexName).type("child").id(Integer.toString(counter) + "_" + k)
+ .parent(Integer.toString(counter))
+ .source(childSource(counter, "tag" + k)));
+ }
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) * (1 + CHILD_COUNT) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT * (1 + CHILD_COUNT))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", termQuery("tag", "tag1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_child filter with match_all child query");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + expected + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(
+ filteredQuery(matchAllQuery(), hasChildFilter("child", termQuery("id", Integer.toString(j + 1))))
+ ).execute().actionGet();
+ long expected = 1;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with single parent match Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ double expected = Math.pow((j + 1), 3) * CHILD_COUNT;
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", constantScoreQuery(rangeFilter("num").lte(expected)))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits: " + searchResponse.getHits().totalHits() + " != " + expected);
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with exponential parent results Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", termQuery("name", "test1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + CHILD_COUNT + "]");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", termQuery("name", "test1"))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + CHILD_COUNT + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_parent filter with match_all parent query ");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(
+ matchAllQuery(),
+ hasParentFilter("parent", matchAllQuery())
+ ))
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != 5000000) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + 5000000 + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter with match_all parent query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ System.out.println("--> Running top_children query");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> top_children Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running top_children query, with match_all as child query");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
+ // we expect to have mismatch on hits here
+// if (searchResponse.hits().totalHits() != COUNT) {
+// System.err.println("mismatch on hits");
+// }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> top_children, with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running has_child query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("tag", "tag1")).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("tag", "tag1")).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", matchAllQuery()).scoreType("max")).execute().actionGet();
+ long expected = (COUNT / BATCH) * BATCH;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("id", Integer.toString(j + 1))).scoreType("max")).execute().actionGet();
+ long expected = 1;
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with single parent match Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ double expected = Math.pow((j + 1), 3) * CHILD_COUNT;
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", constantScoreQuery(rangeFilter("num").lte(expected))).scoreType("max")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != expected) {
+ System.err.println("mismatch on hits: " + searchResponse.getHits().totalHits() + " != " + expected);
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with exponential parent results Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ /*System.out.println("--> Running has_parent query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("name", "test1")).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("name", "test1")).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != CHILD_COUNT) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", matchAllQuery()).scoreType("score")).execute().actionGet();
+ if (searchResponse.getHits().totalHits() != 5000000) {
+ System.err.println("mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");*/
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(int id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("id", Integer.toString(id)).field("num", id).field("name", nameValue).endObject();
+ }
+
+ private static XContentBuilder childSource(int id, String tag) throws IOException {
+ return jsonBuilder().startObject().field("id", Integer.toString(id)).field("num", id).field("tag", tag).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
new file mode 100644
index 0000000..398c9a9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.hasChildFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchShortCircuitBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchShortCircuitBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ long PARENT_COUNT = SizeValue.parseSizeValue("10M").singles();
+ int BATCH = 100;
+ int QUERY_WARMUP = 5;
+ int QUERY_COUNT = 25;
+ String indexName = "test";
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents");
+ long ITERS = PARENT_COUNT / BATCH;
+ int i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(counter)));
+
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+
+ int id = 0;
+ for (i = 1; i <= PARENT_COUNT; i *= 2) {
+ int parentId = 1;
+ for (int j = 0; j < i; j++) {
+ client.prepareIndex(indexName, "child", Integer.toString(id++))
+ .setParent(Integer.toString(parentId++))
+ .setSource(childSource(i))
+ .execute().actionGet();
+ }
+ }
+
+ System.out.println("--> Indexing took " + stopWatch.totalTime());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int i = 1; i <= 10000; i *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(matchQuery("child.field2", i)).execute().actionGet();
+ System.out.println("--> Warmup took["+ i +"]: " + searchResponse.getTook());
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 1; j < QUERY_WARMUP; j *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ hasChildQuery("child", matchQuery("field2", j))
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != j) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + PARENT_COUNT + "]");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", matchQuery("field2", i))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(hasChildQuery("child", matchQuery("field2", i)).scoreType("max"))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field1", Integer.toString(val)).endObject();
+ }
+
+ private static XContentBuilder childSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field2", Integer.toString(val)).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
new file mode 100644
index 0000000..c39bf8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.geo;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoDistanceFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ */
+public class GeoDistanceSearchBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ Node node = NodeBuilder.nodeBuilder().clusterName(GeoDistanceSearchBenchmark.class.getSimpleName()).node();
+ Client client = node.client();
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("Failed to wait for green status, bailing");
+ System.exit(1);
+ }
+
+ final long NUM_DOCS = SizeValue.parseSizeValue("1m").singles();
+ final long NUM_WARM = 50;
+ final long NUM_RUNS = 100;
+
+ if (client.admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ System.out.println("Found an index, count: " + client.prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount());
+ } else {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client.admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+
+ System.err.println("--> Indexing [" + NUM_DOCS + "]");
+ for (long i = 0; i < NUM_DOCS; ) {
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ if ((i % 10000) == 0) {
+ System.err.println("--> indexed " + i);
+ }
+ }
+ System.err.println("Done indexed");
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ }
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox");
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ long totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (memory) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (memory)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_RUNS) + "ms");
+
+
+ System.err.println("--> Warming up (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - no optimize_bbox " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - no optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (SLOPPY_ARC) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (SLOPPY_ARC) " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (PLANE) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (PLANE) " + (totalTime / NUM_RUNS) + "ms");
+
+ node.close();
+ }
+
+ public static void run(Client client, GeoDistance geoDistance, String optimizeBbox) {
+ client.prepareSearch() // from NY
+ .setSearchType(SearchType.COUNT)
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location")
+ .distance("2km")
+ .optimizeBbox(optimizeBbox)
+ .geoDistance(geoDistance)
+ .point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java b/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
new file mode 100644
index 0000000..d084261
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.nested;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class NestedSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("gateway.type", "local")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node node1 = nodeBuilder()
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ int count = (int) SizeValue.parseSizeValue("1m").singles();
+ int nestedCount = 10;
+ int rootDocs = count / nestedCount;
+ int batch = 100;
+ int queryWarmup = 5;
+ int queryCount = 500;
+ String indexName = "test";
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth()
+ .setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ try {
+ client.admin().indices().prepareCreate(indexName)
+ .addMapping("type", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "integer")
+ .endObject()
+ .startObject("field2")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field3")
+ .field("type", "integer")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + rootDocs + "] root documents and [" + (rootDocs * nestedCount) + "] nested objects");
+ long ITERS = rootDocs / batch;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < batch; j++) {
+ counter++;
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("field1", counter)
+ .startArray("field2");
+ for (int k = 0; k < nestedCount; k++) {
+ doc = doc.startObject()
+ .field("field3", k)
+ .endObject();
+ }
+ doc = doc.endArray();
+ request.add(
+ Requests.indexRequest(indexName).type("type").id(Integer.toString(counter)).source(doc)
+ );
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * batch) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * batch) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (count * (1 + nestedCount))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running match_all with sorting on nested field");
+ // run just the child query, warm up first
+ for (int j = 0; j < queryWarmup; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < queryCount; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(j % 2 == 0 ? SortOrder.ASC : SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Sorting by nested fields took: " + (totalQueryTime / queryCount) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java b/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
new file mode 100644
index 0000000..fab0166
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.queryFilter;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+/**
+ *
+ */
+public class NodesStressTest {
+
+ private Node[] nodes;
+
+ private int numberOfNodes = 2;
+
+ private Client[] clients;
+
+ private AtomicLong idGenerator = new AtomicLong();
+
+ private int fieldNumLimit = 50;
+
+ private long searcherIterations = 10;
+ private Searcher[] searcherThreads = new Searcher[1];
+
+ private long indexIterations = 10;
+ private Indexer[] indexThreads = new Indexer[1];
+
+ private TimeValue sleepAfterDone = TimeValue.timeValueMillis(0);
+ private TimeValue sleepBeforeClose = TimeValue.timeValueMillis(0);
+
+ private CountDownLatch latch;
+ private CyclicBarrier barrier1;
+ private CyclicBarrier barrier2;
+
+ public NodesStressTest() {
+ }
+
+ public NodesStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public NodesStressTest fieldNumLimit(int fieldNumLimit) {
+ this.fieldNumLimit = fieldNumLimit;
+ return this;
+ }
+
+ public NodesStressTest searchIterations(int searchIterations) {
+ this.searcherIterations = searchIterations;
+ return this;
+ }
+
+ public NodesStressTest searcherThreads(int numberOfSearcherThreads) {
+ searcherThreads = new Searcher[numberOfSearcherThreads];
+ return this;
+ }
+
+ public NodesStressTest indexIterations(long indexIterations) {
+ this.indexIterations = indexIterations;
+ return this;
+ }
+
+ public NodesStressTest indexThreads(int numberOfWriterThreads) {
+ indexThreads = new Indexer[numberOfWriterThreads];
+ return this;
+ }
+
+ public NodesStressTest sleepAfterDone(TimeValue time) {
+ this.sleepAfterDone = time;
+ return this;
+ }
+
+ public NodesStressTest sleepBeforeClose(TimeValue time) {
+ this.sleepBeforeClose = time;
+ return this;
+ }
+
+ public NodesStressTest build(Settings settings) throws Exception {
+ settings = settingsBuilder()
+// .put("index.refresh_interval", 1, TimeUnit.SECONDS)
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(settings)
+ .build();
+
+ nodes = new Node[numberOfNodes];
+ clients = new Client[numberOfNodes];
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ clients[i] = nodes[i].client();
+ }
+
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher(i);
+ }
+ for (int i = 0; i < indexThreads.length; i++) {
+ indexThreads[i] = new Indexer(i);
+ }
+
+ latch = new CountDownLatch(1);
+ barrier1 = new CyclicBarrier(2);
+ barrier2 = new CyclicBarrier(2);
+ // warmup
+ StopWatch stopWatch = new StopWatch().start();
+ Indexer warmup = new Indexer(-1).max(10000);
+ warmup.start();
+ barrier1.await();
+ barrier2.await();
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Done Warmup, took [" + stopWatch.totalTime() + "]");
+
+ latch = new CountDownLatch(searcherThreads.length + indexThreads.length);
+ barrier1 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+ barrier2 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+
+ return this;
+ }
+
+ public void start() throws Exception {
+ for (Thread t : searcherThreads) {
+ t.start();
+ }
+ for (Thread t : indexThreads) {
+ t.start();
+ }
+ barrier1.await();
+
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+
+ barrier2.await();
+
+ latch.await();
+ stopWatch.stop();
+
+ System.out.println("Done, took [" + stopWatch.totalTime() + "]");
+ System.out.println("Sleeping before close: " + sleepBeforeClose);
+ Thread.sleep(sleepBeforeClose.millis());
+
+ for (Client client : clients) {
+ client.close();
+ }
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("Sleeping before exit: " + sleepBeforeClose);
+ Thread.sleep(sleepAfterDone.millis());
+ }
+
+ class Searcher extends Thread {
+ final int id;
+ long counter = 0;
+ long max = searcherIterations;
+
+ Searcher(int id) {
+ super("Searcher" + id);
+ this.id = id;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ QueryBuilder query = termQuery("num", counter % fieldNumLimit);
+ query = constantScoreQuery(queryFilter(query));
+
+ SearchResponse search = client.search(searchRequest()
+ .source(searchSource().query(query)))
+ .actionGet();
+// System.out.println("Got search response, hits [" + search.hits().totalHits() + "]");
+ }
+ } catch (Exception e) {
+ System.err.println("Failed to search:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ class Indexer extends Thread {
+
+ final int id;
+ long counter = 0;
+ long max = indexIterations;
+
+ Indexer(int id) {
+ super("Indexer" + id);
+ this.id = id;
+ }
+
+ Indexer max(int max) {
+ this.max = max;
+ return this;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ long id = idGenerator.incrementAndGet();
+ client.index(Requests.indexRequest().index("test").type("type1").id(Long.toString(id))
+ .source(XContentFactory.jsonBuilder().startObject()
+ .field("num", id % fieldNumLimit)
+ .endObject()))
+ .actionGet();
+ }
+ System.out.println("Indexer [" + id + "]: Done");
+ } catch (Exception e) {
+ System.err.println("Failed to index:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ private Client client(long i) {
+ return clients[((int) (i % clients.length))];
+ }
+
+ public static void main(String[] args) throws Exception {
+ NodesStressTest test = new NodesStressTest()
+ .numberOfNodes(2)
+ .indexThreads(5)
+ .indexIterations(10 * 1000)
+ .searcherThreads(5)
+ .searchIterations(10 * 1000)
+ .sleepBeforeClose(TimeValue.timeValueMinutes(10))
+ .sleepAfterDone(TimeValue.timeValueMinutes(10))
+ .build(EMPTY_SETTINGS);
+
+ test.start();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
new file mode 100644
index 0000000..e36f645
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadBulkStress {
+
+ public static void main(String[] args) throws Exception {
+ Random random = new Random();
+
+ int shardsCount = Integer.parseInt(System.getProperty("es.shards", "1"));
+ int replicaCount = Integer.parseInt(System.getProperty("es.replica", "1"));
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, shardsCount)
+ .put(SETTING_NUMBER_OF_REPLICAS, replicaCount)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ //Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Node client = nodes[0];
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+// .startObject("field").field("index", "analyzed").field("omit_norms", false).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("2m").singles();
+ int BATCH = 500;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client1.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter)).source(source(Integer.toString(counter), "test" + counter)));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("field", nameValue).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
new file mode 100644
index 0000000..8250f88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadIndexingStress {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put("gateway.type", "none")
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().create(createIndexRequest("test")).actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ int COUNT = 200000;
+ int ID_RANGE = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ int i = 1;
+ for (; i <= COUNT; i++) {
+// client1.admin().cluster().preparePingSingle("test", "type1", Integer.toString(i)).execute().actionGet();
+ client1.prepareIndex("test", "type1").setId(Integer.toString(i % ID_RANGE)).setSource(source(Integer.toString(i), "test" + i))
+ .setCreate(false).execute().actionGet();
+ if ((i % 10000) == 0) {
+ System.out.println("Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ long time = System.currentTimeMillis();
+ return jsonBuilder().startObject()
+ .field("id", id)
+// .field("numeric1", time)
+// .field("numeric2", time)
+// .field("numeric3", time)
+// .field("numeric4", time)
+// .field("numeric5", time)
+// .field("numeric6", time)
+// .field("numeric7", time)
+// .field("numeric8", time)
+// .field("numeric9", time)
+// .field("numeric10", time)
+ .field("name", nameValue)
+ .endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java b/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
new file mode 100644
index 0000000..37b20bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.time;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleTimeBenchmark {
+
+ private static boolean USE_NANO_TIME = false;
+ private static long NUMBER_OF_ITERATIONS = 1000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ if (USE_NANO_TIME) {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.nanoTime();
+ }
+ } else {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
new file mode 100644
index 0000000..2978c5c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageRequest extends TransportRequest {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageRequest(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
new file mode 100644
index 0000000..7a7e3d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageResponse extends TransportResponse {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageResponse(BenchmarkMessageRequest request) {
+ this.id = request.id;
+ this.payload = request.payload;
+ }
+
+ public BenchmarkMessageResponse(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageResponse() {
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public byte[] payload() {
+ return payload;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
new file mode 100644
index 0000000..84bf8d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+
+/**
+ *
+ */
+public class BenchmarkNettyLargeMessages {
+
+ public static void main(String[] args) throws InterruptedException {
+ final ByteSizeValue payloadSize = new ByteSizeValue(10, ByteSizeUnit.MB);
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final int NUMBER_OF_CLIENTS = 5;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .build();
+
+ NetworkService networkService = new NetworkService(settings);
+
+ final ThreadPool threadPool = new ThreadPool();
+ final TransportService transportServiceServer = new TransportService(new NettyTransport(settings, threadPool, networkService, Version.CURRENT), threadPool).start();
+ final TransportService transportServiceClient = new TransportService(new NettyTransport(settings, threadPool, networkService, Version.CURRENT), threadPool).start();
+
+ final DiscoveryNode bigNode = new DiscoveryNode("big", new InetSocketTransportAddress("localhost", 9300), Version.CURRENT);
+// final DiscoveryNode smallNode = new DiscoveryNode("small", new InetSocketTransportAddress("localhost", 9300));
+ final DiscoveryNode smallNode = bigNode;
+
+ transportServiceClient.connectToNode(bigNode);
+ transportServiceClient.connectToNode(smallNode);
+
+ transportServiceServer.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public BenchmarkMessageRequest newInstance() {
+ return new BenchmarkMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ transportServiceClient.submitRequest(bigNode, "benchmark", message, options().withType(TransportRequestOptions.Type.BULK), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+ latch.countDown();
+ }
+ }).start();
+ }
+
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(2, BytesRef.EMPTY_BYTES);
+ long start = System.currentTimeMillis();
+ transportServiceClient.submitRequest(smallNode, "benchmark", message, options().withType(TransportRequestOptions.Type.STATE), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ long took = System.currentTimeMillis() - start;
+ System.out.println("Took " + took + "ms");
+ }
+ }
+ }).start();
+
+ latch.await();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
new file mode 100644
index 0000000..3c206ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class TransportBenchmark {
+
+ static enum Type {
+ LOCAL {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new LocalTransport(settings, threadPool, Version.CURRENT);
+ }
+ },
+ NETTY {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new NettyTransport(settings, threadPool, new NetworkService(ImmutableSettings.EMPTY), Version.CURRENT);
+ }
+ };
+
+ public abstract Transport newTransport(Settings settings, ThreadPool threadPool);
+ }
+
+ public static void main(String[] args) {
+ final String executor = ThreadPool.Names.GENERIC;
+ final boolean waitForRequest = true;
+ final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
+ final int NUMBER_OF_CLIENTS = 10;
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+ final AtomicLong idGenerator = new AtomicLong();
+ final Type type = Type.NETTY;
+
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .build();
+
+ final ThreadPool serverThreadPool = new ThreadPool();
+ final TransportService serverTransportService = new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool).start();
+
+ final ThreadPool clientThreadPool = new ThreadPool();
+ final TransportService clientTransportService = new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool).start();
+
+ final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT);
+
+ serverTransportService.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public BenchmarkMessageRequest newInstance() {
+ return new BenchmarkMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ clientTransportService.connectToNode(node);
+
+ for (int i = 0; i < 10000; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+
+
+ Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
+ final long id = idGenerator.incrementAndGet();
+ BenchmarkMessageRequest request = new BenchmarkMessageRequest(id, payload);
+ BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ if (response.id() != id) {
+ System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]");
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ latch.countDown();
+ }
+ };
+
+ if (waitForRequest) {
+ clientTransportService.submitRequest(node, "benchmark", request, handler).txGet();
+ } else {
+ clientTransportService.sendRequest(node, "benchmark", request, handler);
+ }
+ }
+ }
+ });
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i].start();
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());
+
+ clientTransportService.close();
+ clientThreadPool.shutdownNow();
+
+ serverTransportService.close();
+ serverThreadPool.shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
new file mode 100644
index 0000000..61686eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport.netty;
+
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+
+public class NettyEchoBenchmark {
+
+ public static void main(String[] args) {
+ final int payloadSize = 100;
+ int CYCLE_SIZE = 50000;
+ final long NUMBER_OF_ITERATIONS = 500000;
+
+ ChannelBuffer message = ChannelBuffers.buffer(100);
+ for (int i = 0; i < message.capacity(); i++) {
+ message.writeByte((byte) i);
+ }
+
+ // Configure the server.
+ ServerBootstrap serverBootstrap = new ServerBootstrap(
+ new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(new EchoServerHandler());
+ }
+ });
+
+ // Bind and start to accept incoming connections.
+ serverBootstrap.bind(new InetSocketAddress(9000));
+
+ ClientBootstrap clientBootstrap = new ClientBootstrap(
+ new NioClientSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+// ClientBootstrap clientBootstrap = new ClientBootstrap(
+// new OioClientSocketChannelFactory(Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ final EchoClientHandler clientHandler = new EchoClientHandler();
+ clientBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(clientHandler);
+ }
+ });
+
+ // Start the connection attempt.
+ ChannelFuture future = clientBootstrap.connect(new InetSocketAddress("localhost", 9000));
+ future.awaitUninterruptibly();
+ Channel clientChannel = future.getChannel();
+
+ System.out.println("Warming up...");
+ for (long i = 0; i < 10000; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ System.out.println("Warmed up");
+
+
+ long start = System.currentTimeMillis();
+ long cycleStart = System.currentTimeMillis();
+ for (long i = 1; i < NUMBER_OF_ITERATIONS; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ if ((i % CYCLE_SIZE) == 0) {
+ long cycleEnd = System.currentTimeMillis();
+ System.out.println("Ran 50000, TPS " + (CYCLE_SIZE / ((double) (cycleEnd - cycleStart) / 1000)));
+ cycleStart = cycleEnd;
+ }
+ }
+ long end = System.currentTimeMillis();
+ long seconds = (end - start) / 1000;
+ System.out.println("Ran [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + seconds + "], TPS: " + ((double) NUMBER_OF_ITERATIONS) / seconds);
+
+ clientChannel.close().awaitUninterruptibly();
+ clientBootstrap.releaseExternalResources();
+ serverBootstrap.releaseExternalResources();
+ }
+
+ public static class EchoClientHandler extends SimpleChannelUpstreamHandler {
+
+ public volatile CountDownLatch latch;
+
+ public EchoClientHandler() {
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ latch.countDown();
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+
+
+ public static class EchoServerHandler extends SimpleChannelUpstreamHandler {
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ e.getChannel().write(e.getMessage());
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ // Close the connection when an exception is raised.
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java b/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
new file mode 100644
index 0000000..d9995e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.uuid;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleUuidBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ System.out.println("Generated in " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Generating using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Generate in " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
new file mode 100644
index 0000000..3e4bea1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.blocks;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class SimpleBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void verifyIndexAndClusterReadOnly() throws Exception {
+ // cluster.read_only = null: write and metadata not blocked
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ setIndexReadOnly("test1", "false");
+ canIndexExists("test1");
+
+ // cluster.read_only = true: block write and metadata
+ setClusterReadOnly("true");
+ canNotCreateIndex("test2");
+ // even if index has index.read_only = false
+ canNotIndexDocument("test1");
+ canNotIndexExists("test1");
+
+ // cluster.read_only = false: removes the block
+ setClusterReadOnly("false");
+ canCreateIndex("test2");
+ canIndexDocument("test2");
+ canIndexDocument("test1");
+ canIndexExists("test1");
+
+
+ // newly created an index has no blocks
+ canCreateIndex("ro");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+
+ // adds index write and metadata block
+ setIndexReadOnly( "ro", "true");
+ canNotIndexDocument("ro");
+ canNotIndexExists("ro");
+
+ // other indices not blocked
+ canCreateIndex("rw");
+ canIndexDocument("rw");
+ canIndexExists("rw");
+
+ // blocks can be removed
+ setIndexReadOnly("ro", "false");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+ }
+
+ @Test
+ public void testIndexReadWriteMetaDataBlocks() {
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true))
+ .execute().actionGet();
+ canNotIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, false))
+ .execute().actionGet();
+ canIndexDocument("test1");
+ }
+
+ private void canCreateIndex(String index) {
+ try {
+ CreateIndexResponse r = client().admin().indices().prepareCreate(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotCreateIndex(String index) {
+ try {
+ client().admin().indices().prepareCreate(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ IndexResponse r = builder.execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ builder.execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void setClusterReadOnly(String value) {
+ Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+ }
+
+ private void setIndexReadOnly(String index, Object value) {
+ HashMap<String, Object> newSettings = new HashMap<String, Object>();
+ newSettings.put(IndexMetaData.SETTING_READ_ONLY, value);
+
+ UpdateSettingsRequestBuilder settingsRequest = client().admin().indices().prepareUpdateSettings(index);
+ settingsRequest.setSettings(newSettings);
+ UpdateSettingsResponse settingsResponse = settingsRequest.execute().actionGet();
+ assertThat(settingsResponse, notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java b/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
new file mode 100644
index 0000000..4d841c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.broadcast;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BroadcastActionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBroadcastOperations() throws IOException {
+ prepareCreate("test", 1).execute().actionGet(5000);
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ flush();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ refresh();
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.NO_THREADS).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.SINGLE_THREAD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.THREAD_PER_SHARD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ // test failed (simply query that can't be parsed)
+ CountResponse countResponse = client().count(countRequest("test").source("{ term : { _type : \"type1 } }".getBytes(Charsets.UTF_8))).actionGet();
+
+ assertThat(countResponse.getCount(), equalTo(0l));
+ assertThat(countResponse.getTotalShards(), equalTo(5));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(5));
+ for (ShardOperationFailedException exp : countResponse.getShardFailures()) {
+ assertThat(exp.reason(), containsString("QueryParsingException"));
+ }
+ }
+
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java b/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
new file mode 100644
index 0000000..082be6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler.V;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Array;
+import java.util.Random;
+import java.util.concurrent.ConcurrentMap;
+
+public class MockPageCacheRecycler extends PageCacheRecycler {
+
+ private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = Maps.newConcurrentMap();
+
+ public static void ensureAllPagesAreReleased() {
+ if (ACQUIRED_PAGES.size() > 0) {
+ final Throwable t = ACQUIRED_PAGES.entrySet().iterator().next().getValue();
+ throw new RuntimeException(ACQUIRED_PAGES.size() + " pages have not been released", t);
+ }
+ ACQUIRED_PAGES.clear();
+ }
+
+ private final Random random;
+
+ @Inject
+ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
+ super(settings, threadPool);
+ final long seed = settings.getAsLong(TestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
+ random = new Random(seed);
+ }
+
+ private <T> V<T> wrap(final V<T> v) {
+ ACQUIRED_PAGES.put(v, new Throwable());
+ final Thread t = Thread.currentThread();
+ return new V<T>() {
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ if (t != Thread.currentThread()) {
+ // Releasing from a different thread doesn't break anything but this is bad practice as pages should be acquired
+ // as late as possible and released as soon as possible in a try/finally fashion
+ throw new RuntimeException("Page was allocated in " + t + " but released in " + Thread.currentThread());
+ }
+ final Throwable t = ACQUIRED_PAGES.remove(v);
+ if (t == null) {
+ throw new IllegalStateException("Releasing a page that has not been acquired");
+ }
+ final T ref = v();
+ for (int i = 0; i < Array.getLength(ref); ++i) {
+ if (ref instanceof Object[]) {
+ Array.set(ref, i, null);
+ } else {
+ Array.set(ref, i, (byte) random.nextInt(256));
+ }
+ }
+ return v.release();
+ }
+
+ @Override
+ public T v() {
+ return v.v();
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return v.isRecycled();
+ }
+
+ };
+ }
+
+ @Override
+ public V<byte[]> bytePage(boolean clear) {
+ final V<byte[]> page = super.bytePage(clear);
+ if (!clear) {
+ random.nextBytes(page.v());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<int[]> intPage(boolean clear) {
+ final V<int[]> page = super.intPage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextInt();
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<long[]> longPage(boolean clear) {
+ final V<long[]> page = super.longPage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextLong();
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<double[]> doublePage(boolean clear) {
+ final V<double[]> page = super.doublePage(clear);
+ if (!clear) {
+ for (int i = 0; i < page.v().length; ++i) {
+ page.v()[i] = random.nextDouble() - 0.5;
+ }
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<Object[]> objectPage() {
+ return wrap(super.objectPage());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
new file mode 100644
index 0000000..087b7d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0, transportClientRatio = 1.0)
+public class TransportClientTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPickingUpChangesInDiscoveryNode() {
+ String nodeName = cluster().startNode(ImmutableSettings.builder().put("node.data", false));
+
+ TransportClient client = (TransportClient) cluster().client(nodeName);
+ assertThat(client.connectedNodes().get(0).dataNode(), Matchers.equalTo(false));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java
new file mode 100644
index 0000000..06d4569
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Scoped as test, because the if the test with cluster read only block fails, all other tests fail as well, as this is not cleaned up properly
+ */
+@ClusterScope(scope=Scope.TEST)
+public class BlockClusterStatsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBlocks() throws Exception {
+ createIndex("foo");
+ ClusterUpdateSettingsResponse updateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", true).build()).get();
+ assertThat(updateSettingsResponse.isAcknowledged(), is(true));
+ UpdateSettingsResponse indexSettingsResponse = client().admin().indices().prepareUpdateSettings("foo").setSettings(
+ ImmutableSettings.settingsBuilder().put("index.blocks.read_only", true)).get();
+ assertThat(indexSettingsResponse.isAcknowledged(), is(true));
+
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setBlocks(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().blocks().global(), hasSize(1));
+ assertThat(clusterStateResponseUnfiltered.getState().blocks().indices().size(), is(1));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().blocks().global(), hasSize(0));
+ assertThat(clusterStateResponse.getState().blocks().indices().size(), is(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java b/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
new file mode 100644
index 0000000..4212eba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.empty;
+
+public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
+
+
+ private void assertIndexHealth(ClusterIndexHealth indexHealth, ShardCounter counter, IndexMetaData indexMetaData) {
+ assertThat(indexHealth.getStatus(), equalTo(counter.status()));
+ assertThat(indexHealth.getNumberOfShards(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getNumberOfReplicas(), equalTo(indexMetaData.getNumberOfReplicas()));
+ assertThat(indexHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(indexHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(indexHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(indexHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(indexHealth.getShards().size(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getValidationFailures(), empty());
+ int totalShards = 0;
+ for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) {
+ totalShards += shardHealth.getActiveShards() + shardHealth.getInitializingShards() + shardHealth.getUnassignedShards();
+ }
+
+ assertThat(totalShards, equalTo(indexMetaData.getNumberOfShards() * (1 + indexMetaData.getNumberOfReplicas())));
+ }
+
+ protected class ShardCounter {
+ public int active;
+ public int relocating;
+ public int initializing;
+ public int unassigned;
+ public int primaryActive;
+ public int primaryInactive;
+
+ public ClusterHealthStatus status() {
+ if (primaryInactive > 0) {
+ return ClusterHealthStatus.RED;
+ }
+ if (unassigned > 0 || initializing > 0) {
+ return ClusterHealthStatus.YELLOW;
+ }
+ return ClusterHealthStatus.GREEN;
+ }
+
+ public void update(ShardRouting shardRouting) {
+ if (shardRouting.active()) {
+ active++;
+ if (shardRouting.primary()) {
+ primaryActive++;
+ }
+ if (shardRouting.relocating()) {
+ relocating++;
+ }
+ return;
+ }
+
+ if (shardRouting.primary()) {
+ primaryInactive++;
+ }
+ if (shardRouting.initializing()) {
+ initializing++;
+ } else {
+ unassigned++;
+ }
+ }
+ }
+
+ static int node_id = 1;
+
+ private ImmutableShardRouting genShardRouting(String index, int shardId, boolean primary) {
+
+ ShardRoutingState state;
+
+ int i = randomInt(40);
+ if (i > 5) {
+ state = ShardRoutingState.STARTED;
+ } else if (i > 3) {
+ state = ShardRoutingState.RELOCATING;
+ } else if (i > 1) {
+ state = ShardRoutingState.INITIALIZING;
+ } else {
+ state = ShardRoutingState.UNASSIGNED;
+ }
+
+ switch (state) {
+ case UNASSIGNED:
+ return new MutableShardRouting(index, shardId, null, primary, ShardRoutingState.UNASSIGNED, 1);
+ case STARTED:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.STARTED, 1);
+ case INITIALIZING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.INITIALIZING, 1);
+ case RELOCATING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), "node_" + Integer.toString(node_id++), primary, ShardRoutingState.RELOCATING, 1);
+ default:
+ throw new ElasticsearchException("Unknown state: " + state.name());
+ }
+
+ }
+
+ private IndexShardRoutingTable genShardRoutingTable(String index, int shardId, int replicas, ShardCounter counter) {
+ IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId), true);
+ ImmutableShardRouting shardRouting = genShardRouting(index, shardId, true);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ for (; replicas > 0; replicas--) {
+ shardRouting = genShardRouting(index, shardId, false);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ }
+
+ return builder.build();
+ }
+
+ IndexRoutingTable genIndexRoutingTable(IndexMetaData indexMetaData, ShardCounter counter) {
+ IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.index());
+ for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
+ builder.addIndexShard(genShardRoutingTable(indexMetaData.index(), shard, indexMetaData.getNumberOfReplicas(), counter));
+ }
+ return builder.build();
+ }
+
+ @Test
+ public void testClusterIndexHealth() {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ logger.info("index status: {}, expected {}", indexHealth.getStatus(), counter.status());
+ assertIndexHealth(indexHealth, counter, indexMetaData);
+ }
+
+ private void assertClusterHealth(ClusterHealthResponse clusterHealth, ShardCounter counter) {
+ assertThat(clusterHealth.getStatus(), equalTo(counter.status()));
+ assertThat(clusterHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(counter.primaryActive));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(clusterHealth.getValidationFailures(), empty());
+ }
+
+ @Test
+ public void testClusterHealth() {
+ ShardCounter counter = new ShardCounter();
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ for (int i = randomInt(4); i >= 0; i--) {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ }
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
+ logger.info("cluster status: {}, expected {}", clusterHealth.getStatus(), counter.status());
+
+ assertClusterHealth(clusterHealth, counter);
+ }
+
+ @Test
+ public void testValidations() {
+ IndexMetaData indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(2).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(3).build();
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ assertThat(indexHealth.getValidationFailures(), Matchers.hasSize(2));
+
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
+ // currently we have no cluster level validation failures as index validation issues are reported per index.
+ assertThat(clusterHealth.getValidationFailures(), Matchers.hasSize(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java b/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
new file mode 100644
index 0000000..2dc906f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterHealthTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testHealth() {
+ logger.info("--> running cluster health on an index that does not exists");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> running cluster wide health");
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> Creating index test1 with zero replicas");
+ client().admin().indices().prepareCreate("test1")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ logger.info("--> running cluster health on an index that does exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> running cluster health on an index that does exists and an index that doesn't exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().size(), equalTo(1));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
new file mode 100644
index 0000000..4789d6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
@@ -0,0 +1,658 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Singleton;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class ClusterServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testTimeoutUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
+ final CountDownLatch block = new CountDownLatch(1);
+ clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ try {
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ clusterService1.submitStateUpdateTask("test2", new TimeoutClusterStateUpdateTask() {
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueMillis(2);
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ timedOut.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ executeCalled.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+
+ assertThat(timedOut.await(500, TimeUnit.MILLISECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testAckedUpdateTaskSameClusterState() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testAckedUpdateTaskNoAckExpected() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTaskTimeoutZero() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ cluster().startNode(settings);
+ ClusterService clusterService = cluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(0);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(false));
+ assertThat(ackTimeout.get(), equalTo(true));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testPendingUpdateTask() throws Exception {
+ Settings zenSettings = settingsBuilder()
+ .put("discovery.type", "zen").build();
+ String node_0 = cluster().startNode(zenSettings);
+ cluster().startNodeClient(zenSettings);
+
+
+ ClusterService clusterService = cluster().getInstance(ClusterService.class, node_0);
+ final CountDownLatch block1 = new CountDownLatch(1);
+ final CountDownLatch invoked1 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked1.countDown();
+ try {
+ block1.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked1.countDown();
+ fail();
+ }
+ });
+ invoked1.await();
+ final CountDownLatch invoked2 = new CountDownLatch(9);
+ for (int i = 2; i <= 10; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked2.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ }
+
+ // The tasks can be re-ordered, so we need to check out-of-order
+ Set<String> controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), equalTo(9));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ PendingClusterTasksResponse response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks().size(), equalTo(9));
+ for (PendingClusterTask task : response) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+ block1.countDown();
+ invoked2.await();
+
+ pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks, empty());
+ response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks(), empty());
+
+ final CountDownLatch block2 = new CountDownLatch(1);
+ final CountDownLatch invoked3 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked3.countDown();
+ try {
+ block2.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked3.countDown();
+ fail();
+ }
+ });
+ invoked3.await();
+
+ for (int i = 2; i <= 5; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ }
+ Thread.sleep(100);
+
+ pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), equalTo(4));
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5"));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ assertTrue(controlSources.remove(task.source().string()));
+ }
+ assertTrue(controlSources.isEmpty());
+
+ response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks().size(), equalTo(4));
+ controlSources = new HashSet<String>(Arrays.asList("2", "3", "4", "5"));
+ for (PendingClusterTask task : response) {
+ assertTrue(controlSources.remove(task.source().string()));
+ assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
+ }
+ assertTrue(controlSources.isEmpty());
+ block2.countDown();
+ }
+
+ @Test
+ public void testListenerCallbacks() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 1)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("plugin.types", TestPlugin.class.getName())
+ .build();
+
+ cluster().startNode(settings);
+ ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
+ MasterAwareService testService1 = cluster().getInstance(MasterAwareService.class);
+
+ // the first node should be a master as the minimum required is 1
+ assertThat(clusterService1.state().nodes().masterNode(), notNullValue());
+ assertThat(clusterService1.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService1.master(), is(true));
+
+ String node_1 = cluster().startNode(settings);
+ final ClusterService clusterService2 = cluster().getInstance(ClusterService.class, node_1);
+ MasterAwareService testService2 = cluster().getInstance(MasterAwareService.class, node_1);
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // the second node should not be the master as node1 is already the master.
+ assertThat(clusterService2.state().nodes().localNodeMaster(), is(false));
+ assertThat(testService2.master(), is(false));
+
+ cluster().stopCurrentMasterNode();
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // now that node1 is closed, node2 should be elected as master
+ assertThat(clusterService2.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService2.master(), is(true));
+
+ Settings newSettings = settingsBuilder()
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.type", "zen")
+ .build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet();
+
+ // there should not be any master as the minimum number of required eligible masters is not met
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ return clusterService2.state().nodes().masterNode() == null;
+ }
+ });
+ assertThat(testService2.master(), is(false));
+
+
+ String node_2 = cluster().startNode(settings);
+ clusterService1 = cluster().getInstance(ClusterService.class, node_2);
+ testService1 = cluster().getInstance(MasterAwareService.class, node_2);
+
+ // make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive the updated cluster state...
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").execute().actionGet().isTimedOut(), is(false));
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").execute().actionGet().isTimedOut(), is(false));
+
+ // now that we started node1 again, a new master should be elected
+ assertThat(clusterService1.state().nodes().masterNode(), is(notNullValue()));
+ if (node_2.equals(clusterService1.state().nodes().masterNode().name())) {
+ assertThat(testService1.master(), is(true));
+ assertThat(testService2.master(), is(false));
+ } else {
+ assertThat(testService1.master(), is(false));
+ assertThat(testService2.master(), is(true));
+ }
+
+ }
+
+ public static class TestPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test plugin";
+ }
+
+ @Override
+ public String description() {
+ return "test plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends LifecycleComponent>> services() {
+ List<Class<? extends LifecycleComponent>> services = new ArrayList<Class<? extends LifecycleComponent>>(1);
+ services.add(MasterAwareService.class);
+ return services;
+ }
+ }
+
+ @Singleton
+ public static class MasterAwareService extends AbstractLifecycleComponent<MasterAwareService> implements LocalNodeMasterListener {
+
+ private final ClusterService clusterService;
+ private volatile boolean master;
+
+ @Inject
+ public MasterAwareService(Settings settings, ClusterService clusterService) {
+ super(settings);
+ clusterService.add(this);
+ this.clusterService = clusterService;
+ logger.info("initialized test service");
+ }
+
+ @Override
+ public void onMaster() {
+ logger.info("on master [" + clusterService.localNode() + "]");
+ master = true;
+ }
+
+ @Override
+ public void offMaster() {
+ logger.info("off master [" + clusterService.localNode() + "]");
+ master = false;
+ }
+
+ public boolean master() {
+ return master;
+ }
+
+ @Override
+ protected void doStart() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ }
+
+ @Override
+ protected void doClose() throws ElasticsearchException {
+ }
+
+ @Override
+ public String executorName() {
+ return ThreadPool.Names.SAME;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
new file mode 100644
index 0000000..443debf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class DiskUsageTests extends ElasticsearchTestCase {
+
+ @Test
+ public void diskUsageCalcTest() {
+ DiskUsage du = new DiskUsage("node1", 100, 40);
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(40.0));
+ assertThat(du.getFreeBytes(), equalTo(40L));
+ assertThat(du.getUsedBytes(), equalTo(60L));
+ assertThat(du.getTotalBytes(), equalTo(100L));
+
+ }
+
+ @Test
+ public void randomDiskUsageTest() {
+ int iters = atLeast(1000);
+ for (int i = 1; i < iters; i++) {
+ long total = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ long free = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ if (free > total || total <= 0) {
+ try {
+ new DiskUsage("random", total, free);
+ fail("should never reach this");
+ } catch (IllegalStateException e) {
+ }
+ } else {
+ DiskUsage du = new DiskUsage("random", total, free);
+ assertThat(du.getFreeBytes(), equalTo(free));
+ assertThat(du.getTotalBytes(), equalTo(total));
+ assertThat(du.getUsedBytes(), equalTo(total - free));
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * ((double)free / total)));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java b/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
new file mode 100644
index 0000000..9747704
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.TEST, numNodes=0)
+public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMinimumMasterNodes() throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .put("index.number_of_shards", 1)
+ .build();
+
+ logger.info("--> start first node");
+ cluster().startNode(settings);
+
+ logger.info("--> should be blocked, no master...");
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> start second node, cluster should be formed");
+ cluster().startNode(settings);
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(false));
+
+ createIndex("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(2).execute().actionGet().getActiveShards(), equalTo(2));
+ // flush for simpler debugging
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopCurrentMasterNode();
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> starting the previous master node again...");
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopRandomNonMasterNode();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ }), equalTo(true));
+
+ logger.info("--> starting the previous master node again...");
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+ }
+
+ @Test
+ public void multipleNodesShutdownNonMasterNodes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 3)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .build();
+
+ logger.info("--> start first 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ ClusterState state;
+
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ });
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
+
+ logger.info("--> start two more nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+
+ createIndex("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(10).execute().actionGet().isTimedOut(), equalTo(false));
+ // flush for simpler debugging
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ cluster().stopRandomNonMasterNode();
+ cluster().stopRandomNonMasterNode();
+
+ logger.info("--> verify that there is no master anymore on remaining nodes");
+ // spin here to wait till the state is set
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> start back the 2 nodes ");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+ }
+
+ @Test
+ public void dynamicUpdateMinimumMasterNodes() throws InterruptedException {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("gateway.type", "local")
+ .build();
+
+ logger.info("--> start 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ // wait until second node join the cluster
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> setting minimum master node to 2");
+ setMinimumMasterNodes(2);
+
+ // make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
+ for (Client client : cluster()) {
+ assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
+ equalTo(false));
+ }
+
+ logger.info("--> stopping a node");
+ cluster().stopRandomNode();
+ logger.info("--> verifying min master node has effect");
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> bringing another node up");
+ cluster().startNode(settings);
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ }
+
+ private void assertNoMasterBlockOnAllNodes() throws InterruptedException {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ boolean success = true;
+ for (Client client : cluster()) {
+ ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ success &= state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK));
+ }
+ }
+ return success;
+ }
+ }, 20, TimeUnit.SECONDS), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
new file mode 100644
index 0000000..94eb635
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class NoMasterNodeTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNoMasterActions() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("action.auto_create_index", false)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("index.number_of_shards", 1)
+ .build();
+
+ TimeValue timeout = TimeValue.timeValueMillis(200);
+
+ cluster().startNode(settings);
+ // start a second node, create an index, and then shut it down so we have no master block
+ cluster().startNode(settings);
+ createIndex("test");
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ cluster().stopRandomNode();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
+ }
+ }), equalTo(true));
+
+
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ PercolateSourceBuilder percolateSource = new PercolateSourceBuilder();
+ percolateSource.percolateDocument().setDoc(new HashMap());
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateSource).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ long now = System.currentTimeMillis();
+ try {
+ client().prepareUpdate("test", "type1", "1").setScript("test script").setTimeout(timeout).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().admin().indices().prepareAnalyze("test", "this is a test").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ try {
+ client().prepareCount("test").execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ now = System.currentTimeMillis();
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).execute().actionGet();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java b/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
new file mode 100644
index 0000000..9a3f9dd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.CollectionAssertions;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Checking simple filtering capabilites of the cluster state
+ *
+ */
+public class SimpleClusterStateTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void indexData() throws Exception {
+ index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject());
+ index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject());
+ index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject());
+ refresh();
+ }
+
+ @Test
+ public void testRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setRoutingTable(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("baz"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("non-existent"), is(false));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("foo"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("fuu"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("baz"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("non-existent"), is(false));
+ }
+
+ @Test
+ public void testNodes() throws Exception {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setNodes(true).get();
+ assertThat(clusterStateResponse.getState().nodes().nodes().size(), is(cluster().size()));
+
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponseFiltered.getState().nodes().nodes().size(), is(0));
+ }
+
+ @Test
+ public void testMetadata() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setMetaData(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().indices().size(), is(3));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0));
+ }
+
+ @Test
+ public void testIndexTemplates() throws Exception {
+ client().admin().indices().preparePutTemplate("foo_template")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("fuu_template")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().templates().size(), is(greaterThanOrEqualTo(2)));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndexTemplates("foo_template").get();
+ assertThat(clusterStateResponse.getState().metaData().templates().size(), is(1));
+ }
+
+ @Test
+ public void testThatFilteringByIndexWorksForMetadataAndRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear()
+ .setMetaData(true).setRoutingTable(true).setIndices("foo", "fuu", "non-existent").get();
+
+ // metadata
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices().size(), is(2));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("foo"));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("fuu"));
+
+ // routing table
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("baz"), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java b/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
new file mode 100644
index 0000000..cb23803
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDataNodes() throws Exception {
+ cluster().startNode(settingsBuilder().put("node.data", false).build());
+ client().admin().indices().create(createIndexRequest("test")).actionGet();
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ cluster().startNode(settingsBuilder().put("node.data", false).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ // still no shard should be allocated
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ // now, start a node data, and see that it gets with shards
+ cluster().startNode(settingsBuilder().put("node.data", true).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java b/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
new file mode 100644
index 0000000..521c2fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ protected final ImmutableSettings.Builder settingsBuilder() {
+ return ImmutableSettings.builder().put("discovery.type", "zen");
+ }
+
+ @Test
+ public void simpleOnlyMasterNodeElection() {
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node");
+ final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> stop master node");
+ cluster().stopCurrentMasterNode();
+
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+
+ logger.info("--> start master node");
+ final String nextMasterEligableNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ }
+
+ @Test
+ public void electOnlyBetweenMasterNodes() {
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node (1)");
+ final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> start master node (2)");
+ final String nextMasterEligableNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> closing master node (1)");
+ cluster().stopCurrentMasterNode();
+ assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ }
+
+ /**
+ * Tests that putting custom default mapping and then putting a type mapping will have the default mapping merged
+ * to the type mapping.
+ */
+ @Test
+ public void testCustomDefaultMapping() throws Exception {
+ logger.info("--> start master node / non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings("number_of_shards", 1).get());
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ MappingMetaData defaultMapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("_default_");
+ assertThat(defaultMapping.getSourceAsMap().get("_timestamp"), notNullValue());
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource("foo", "enabled=true"));
+ MappingMetaData type1Mapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("type1");
+ assertThat(type1Mapping.getSourceAsMap().get("_timestamp"), notNullValue());
+ }
+
+ @Test
+ public void testAliasFilterValidation() throws Exception {
+ logger.info("--> start master node / non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"string\" },\"field_b\" :{ \"type\" : \"string\" }}}}}}"));
+ client().admin().indices().prepareAliases().addAlias("test", "a_test", FilterBuilders.nestedFilter("table_a", FilterBuilders.termFilter("table_a.field_b", "y"))).get();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
new file mode 100644
index 0000000..1e1274f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class UpdateSettingsValidationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testUpdateSettingsValidation() throws Exception {
+ String master = cluster().startNode(settingsBuilder().put("node.data", false).build());
+ String node_1 = cluster().startNode(settingsBuilder().put("node.master", false).build());
+ String node_2 = cluster().startNode(settingsBuilder().put("node.master", false).build());
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).execute().actionGet();
+ healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(5));
+
+ try {
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.refresh_interval", "")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ logger.info("Error message: [{}]", ex.getMessage());
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java
new file mode 100644
index 0000000..b05e410
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ZenUnicastDiscoveryTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@LuceneTestCase.Slow
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class ZenUnicastDiscoveryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @TestLogging("discovery.zen:TRACE")
+ // The bug zen unicast ping override bug, may rarely manifest itself, it is very timing dependant.
+ // Without the fix in UnicastZenPing, this test fails roughly 1 out of 10 runs from the command line.
+ public void testMasterElectionNotMissed() throws Exception {
+ final Settings settings = settingsBuilder()
+ // Failure only manifests if multicast ping is disabled!
+ .put("discovery.zen.ping.multicast.ping.enabled", false)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ // Can't use this, b/c at the moment all node will only ping localhost:9300
+// .put("discovery.zen.ping.unicast.hosts", "localhost")
+ .put("discovery.zen.ping.unicast.hosts", "localhost:15300,localhost:15301,localhost:15302")
+ .put("transport.tcp.port", "15300-15400")
+ .build();
+
+ final CountDownLatch latch = new CountDownLatch(3);
+ final AtomicArray<String> nodes = new AtomicArray<String>(3);
+ Runnable r1 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start first node");
+ nodes.set(0, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r1).start();
+
+ sleep(between(500, 3000));
+ Runnable r2 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start second node");
+ nodes.set(1, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r2).start();
+
+
+ sleep(between(500, 3000));
+ Runnable r3 = new Runnable() {
+
+ @Override
+ public void run() {
+ logger.info("--> start third node");
+ nodes.set(2, cluster().startNode(settings));
+ latch.countDown();
+ }
+ };
+ new Thread(r3).start();
+ latch.await();
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ DiscoveryNode masterDiscoNode = null;
+ for (String node : nodes.toArray(new String[3])) {
+ ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(3));
+ if (masterDiscoNode == null) {
+ masterDiscoNode = state.nodes().masterNode();
+ } else {
+ assertThat(masterDiscoNode.equals(state.nodes().masterNode()), equalTo(true));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
new file mode 100644
index 0000000..a2e9d6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = TEST)
+public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ return ImmutableSettings.builder().put("discovery.zen.publish_timeout", 0).build();
+ }
+
+ @Test
+ public void testClusterUpdateSettingsAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodesInfo.getAt(0).getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertAcked(clusterUpdateSettingsResponse);
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ assertThat(clusterState.routingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) {
+ //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
+ assertThat(shardRouting.relocating(), equalTo(true));
+ }
+ }
+ }
+ }
+ }
+
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+
+ //removes the allocation exclude settings
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", "")).get();
+ }
+
+ @Test
+ public void testClusterUpdateSettingsNoAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodesInfo.getAt(0).getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTimeout("0s")
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(false));
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+
+ //removes the allocation exclude settings
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", "")).get();
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
new file mode 100644
index 0000000..f13fd70
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = SUITE)
+public class AckTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ return ImmutableSettings.builder().put("discovery.zen.publish_timeout", 0).build();
+ }
+
+ @Test
+ public void testUpdateSettingsAcknowledgement() {
+ createIndex("test");
+
+ assertAcked(client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)));
+
+ for (Client client : clients()) {
+ String refreshInterval = getLocalClusterState(client).metaData().index("test").settings().get("index.refresh_interval");
+ assertThat(refreshInterval, equalTo("9999"));
+ }
+ }
+
+ @Test
+ public void testUpdateSettingsNoAcknowledgement() {
+ createIndex("test");
+
+ UpdateSettingsResponse updateSettingsResponse = client().admin().indices().prepareUpdateSettings("test").setTimeout("0s")
+ .setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)).get();
+ assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testPutWarmerAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
+ }
+ }
+
+ @Test
+ public void testPutWarmerNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteWarmerAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testDeleteWarmerNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMappingAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string").get();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1");
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").addTypes("type1").get();
+ assertThat(getMappingsResponse.mappings().get("test").get("type1"), notNullValue());
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test").setType("type1"));
+
+ for (Client client : clients()) {
+ getMappingsResponse = client.admin().indices().prepareGetMappings("test").addTypes("type1").setLocal(true).get();
+ assertThat(getMappingsResponse.mappings().size(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testDeleteMappingNoAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string").get();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1");
+
+ DeleteMappingResponse deleteMappingResponse = client().admin().indices().prepareDeleteMapping("test").setTimeout("0s").setType("type1").get();
+ assertThat(deleteMappingResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgement() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().add(moveAllocationCommand));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //if the shard that we wanted to move is still on the same node, it must be relocating
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.relocating(), equalTo(true));
+ }
+
+ }
+
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.state(), anyOf(equalTo(ShardRoutingState.INITIALIZING), equalTo(ShardRoutingState.STARTED)));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+ }
+ //let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ waitForRelocation();
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get();
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));
+
+ //testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that
+ //all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update.
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //the shard that we wanted to move is still on the same node, as we had dryRun flag
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.started(), equalTo(true));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ fail("shard [" + mutableShardRouting + "] shouldn't be on node [" + moveAllocationCommand.toString() + "]");
+ }
+ }
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", atLeast(cluster().size()))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get();
+ //acknowledged anyway as no changes were made
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true));
+ }
+
+ private MoveAllocationCommand getAllocationCommand() {
+ String fromNodeId = null;
+ String toNodeId = null;
+ MutableShardRouting shardToBeMoved = null;
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ for (RoutingNode routingNode : clusterStateResponse.getState().routingNodes()) {
+ if (routingNode.node().isDataNode()) {
+ if (fromNodeId == null && routingNode.numberOfOwningShards() > 0) {
+ fromNodeId = routingNode.nodeId();
+ shardToBeMoved = routingNode.get(randomInt(routingNode.size() - 1));
+ } else {
+ toNodeId = routingNode.nodeId();
+ }
+
+ if (toNodeId != null && fromNodeId != null) {
+ break;
+ }
+ }
+ }
+
+ assertNotNull(fromNodeId);
+ assertNotNull(toNodeId);
+ assertNotNull(shardToBeMoved);
+
+ logger.info("==> going to move shard [{}] from [{}] to [{}]", shardToBeMoved, fromNodeId, toNodeId);
+ return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId);
+ }
+
+ @Test
+ public void testIndicesAliasesAcknowledgement() {
+ createIndex("test");
+
+ //testing acknowledgement when trying to submit an existing alias too
+ //in that case it would not make any change, but we are sure about the cluster state
+ //as the previous operation was acknowledged
+ for (int i = 0; i < 2; i++) {
+ assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias"));
+
+ for (Client client : clients()) {
+ AliasMetaData aliasMetaData = getLocalClusterState(client).metaData().aliases().get("alias").get("test");
+ assertThat(aliasMetaData.alias(), equalTo("alias"));
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesAliasesNoAcknowledgement() {
+ createIndex("test");
+
+ IndicesAliasesResponse indicesAliasesResponse = client().admin().indices().prepareAliases().addAlias("test", "alias").setTimeout("0s").get();
+ assertThat(indicesAliasesResponse.isAcknowledged(), equalTo(false));
+ }
+
+ public void testCloseIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.CLOSE));
+ }
+ }
+
+ @Test
+ public void testCloseIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").setTimeout("0s").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testOpenIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertAcked(client().admin().indices().prepareOpen("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.OPEN));
+ }
+ }
+
+ @Test
+ public void testOpenIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").setTimeout("0s").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testPutMappingAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed"));
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
+ }
+ }
+
+ @Test
+ public void testPutMappingNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testCreateIndexAcknowledgement() {
+ createIndex("test");
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().containsKey("test"), equalTo(true));
+ }
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ //but we do want to check that the new index is on all nodes cluster state even before green
+ ensureGreen();
+ }
+
+ @Test
+ public void testCreateIndexNoAcknowledgement() {
+ CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate("test").setTimeout("0s").get();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(false));
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ ensureGreen();
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000..a63d885
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Test
+ public void testSimpleAwareness() throws Exception {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.schedule", "10ms")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build();
+
+
+ logger.info("--> starting 2 nodes on the same rack");
+ cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
+ cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
+
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> starting 1 node on a different rack");
+ String node3 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
+
+ long start = System.currentTimeMillis();
+ ObjectIntOpenHashMap<String> counts;
+ // On slow machines the initial relocation might be delayed
+ do {
+ Thread.sleep(100);
+ logger.info("--> waiting for no relocation");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> checking current state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ //System.out.println(clusterState.routingTable().prettyPrint());
+ // verify that we have 10 shards on node3
+ counts = new ObjectIntOpenHashMap<String>();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ } while (counts.get(node3) != 10 && (System.currentTimeMillis() - start) < 10000);
+ assertThat(counts.get(node3), equalTo(10));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZones() throws InterruptedException {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+ logger.info("--> starting 6 nodes on different zones");
+ String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ String A_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(A_0), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZonesIncrementalNodes() throws InterruptedException {
+ Settings commonSettings = ImmutableSettings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+
+ logger.info("--> starting 2 nodes on zones 'a' & 'b'");
+ String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
+ String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(5));
+ logger.info("--> starting another node in zone 'b'");
+
+ String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+
+ String noZoneNode = cluster().startNode();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.containsKey(noZoneNode), equalTo(false));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.settingsBuilder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
+
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntOpenHashMap<String>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(3));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.get(noZoneNode), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
new file mode 100644
index 0000000..3b96965
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRerouteTests.class);
+
+ @Test
+ public void rerouteWithCommands_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithCommands_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name())
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ private void rerouteWithCommands(Settings commonSettings) throws Exception {
+ String node_1 = cluster().startNode(commonSettings);
+ String node_2 = cluster().startNode(commonSettings);
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, *under dry_run*");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .setDryRun(true)
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("--> get the state, verify nothing changed because of the dry run");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ logger.info("--> move shard 1 primary from node1 to node2");
+ state = client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet().getState();
+
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name())
+ .put("gateway.type", "local")
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
+ logger.info("--> starting 2 nodes");
+ String node_1 = cluster().startNode(commonSettings);
+ cluster().startNode(commonSettings);
+ assertThat(cluster().size(), equalTo(2));
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
+
+ logger.info("--> closing all nodes");
+ File[] shardLocation = cluster().getInstance(NodeEnvironment.class, node_1).shardLocations(new ShardId("test", 0));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
+ cluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
+
+ logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // verify again after cluster was shut down
+ assertThat(FileSystemUtils.deleteRecursively(shardLocation), equalTo(true));
+
+ logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
+ node_1 = cluster().startNode(commonSettings);
+ cluster().startNode(commonSettings);
+ // wait a bit for the cluster to realize that the shard is not there...
+ // TODO can we get around this? the cluster is RED, so what do we wait for?
+ client().admin().cluster().prepareReroute().get();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
+ logger.info("--> explicitly allocate primary");
+ state = client().admin().cluster().prepareReroute()
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
new file mode 100644
index 0000000..448276c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(FilteringAllocationTests.class);
+
+ @Test
+ public void testDecommissionNodeNoReplicas() throws Exception {
+ logger.info("--> starting 2 nodes");
+ final String node_0 = cluster().startNode();
+ final String node_1 = cluster().startNode();
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+
+ logger.info("--> decommission the second node");
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._name", node_1))
+ .execute().actionGet();
+ waitForRelocation();
+
+ logger.info("--> verify all are allocated on node1 now");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_0));
+ }
+ }
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testDisablingAllocationFiltering() throws Exception {
+ logger.info("--> starting 2 nodes");
+ final String node_0 = cluster().startNode();
+ final String node_1 = cluster().startNode();
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ ensureGreen();
+
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
+ int numShardsOnNode1 = 0;
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).name())) {
+ numShardsOnNode1++;
+ }
+ }
+ }
+
+ if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) {
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet();
+ // make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating
+ // but we already fired up the request to wait for 0 relocating shards.
+ }
+ logger.info("--> remove index from the first node");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", node_0))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify all shards are allocated on node_1 now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ indexRoutingTable = clusterState.routingTable().index("test");
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_1));
+ }
+ }
+
+ logger.info("--> disable allocation filtering ");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", ""))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify that there are shards allocated on both nodes now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2));
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
new file mode 100644
index 0000000..7f537ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
+
+
+ public void testLoadDefaultShardsAllocator() {
+ assertAllocatorInstance(ImmutableSettings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByShortKeyShardsAllocator() {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.EVEN_SHARD_COUNT_ALLOCATOR_KEY)
+ .build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByClassNameShardsAllocator() {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "EvenShardsCount").build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY,
+ "org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator").build();
+ assertAllocatorInstance(build, EvenShardsCountAllocator.class);
+ }
+
+ private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
+ while (cluster().size() != 0) {
+ cluster().stopRandomNode();
+ }
+ cluster().startNode(settings);
+ ShardsAllocator instance = cluster().getInstance(ShardsAllocator.class);
+ assertThat(instance, instanceOf(clazz));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
new file mode 100644
index 0000000..c886519
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SimpleAllocationTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * Test for
+ * https://groups.google.com/d/msg/elasticsearch/y-SY_HyoB-8/EZdfNt9VO44J
+ */
+ @Test
+ public void testSaneAllocation() {
+ prepareCreate("test", 3,
+ settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1))
+ .execute().actionGet();
+ ensureGreen();
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(2));
+ }
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(1));
+ }
+ }
+
+ // create another index
+ prepareCreate("test2", 3,
+ settingsBuilder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1))
+ .execute()
+ .actionGet();
+ ensureGreen();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(4));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
new file mode 100644
index 0000000..18fbb5e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class MappingMetaDataParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseIdAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testFailIfIdIsNoValue() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startArray("id").value("id").endArray().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an array
+ }
+
+ bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("id").field("x", "id").endObject().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an object
+ }
+ }
+
+ @Test
+ public void testParseRoutingAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestamp() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRoutingWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.routingResolved(), equalTo(false));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.obj0.id"),
+ new MappingMetaData.Routing(true, "obj1.obj2.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.obj3.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1")
+ .startObject("obj0")
+ .field("id", "id")
+ .endObject()
+ .startObject("obj2")
+ .field("routing", "routing_value")
+ .endObject()
+ .startObject("obj3")
+ .field("timestamp", "1")
+ .endObject()
+ .endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj1").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ //
+ @Test
+ public void testParseIdRoutingTimestampWithRepeatedField() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("field1"),
+ new MappingMetaData.Routing(true, "field1.field1"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .field("field1", "bar")
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("foo"));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "field1.field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedString(""),
+ new MappingMetaData.Id(null),
+ new MappingMetaData.Routing(true, "field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), equalTo("bar"));
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
new file mode 100644
index 0000000..8ff8c26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
+import static org.hamcrest.Matchers.emptyArray;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class MetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIndexOptions_strict() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.strict();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ try {
+ md.concreteIndices(new String[]{"bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"foofoo", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ try {
+ md.concreteIndices(new String[]{"foo", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"barbaz", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ try {
+ md.concreteIndices(new String[]{"barbaz", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ results = md.concreteIndices(new String[]{"baz*"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ @Test
+ public void testIndexOptions_lenient() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.lenient();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foofoo", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(new String[]{"foo", "bar"}, options);
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo"));
+
+ results = md.concreteIndices(new String[]{"barbaz", "foobar"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(new String[]{"barbaz", "bar"}, options);
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo"));
+
+ results = md.concreteIndices(new String[]{"baz*"}, options);
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ @Test
+ public void testIndexOptions_allowUnavailableExpandOpenDisAllowEmpty() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.fromOptions(true, false, true, false);
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertThat(results, emptyArray());
+
+ try {
+ md.concreteIndices(new String[]{"baz*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+ try {
+ md.concreteIndices(new String[]{"foo", "baz*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void testIndexOptions_wildcardExpansion() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("bar"))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ // Only closed
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true);
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ // no wildcards, so wildcard expansion don't apply
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Only open
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar"));
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("foobar", results[0]);
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Open and closed
+ options = IndicesOptions.fromOptions(false, true, true, true);
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertEquals(3, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo"));
+
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foobar", "foo"));
+
+ results = md.concreteIndices(new String[]{"bar"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(new String[]{"-foo*"}, options);
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(new String[]{"-*"}, options);
+ assertEquals(0, results.length);
+
+ options = IndicesOptions.fromOptions(false, false, true, true);
+ try {
+ md.concreteIndices(new String[]{"-*"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void testIndexOptions_emptyCluster() {
+ MetaData md = MetaData.builder().build();
+ IndicesOptions options = IndicesOptions.strict();
+
+ String[] results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(new String[]{"foo"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(new String[]{"foo*", "bar"}, options);
+ fail();
+ } catch (IndexMissingException e) {}
+
+
+ options = IndicesOptions.lenient();
+ results = md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo"}, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo*"}, options);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(new String[]{"foo*", "bar"}, options);
+ assertThat(results, emptyArray());
+
+ options = IndicesOptions.fromOptions(true, false, true, false);
+ try {
+ md.concreteIndices(Strings.EMPTY_ARRAY, options);
+ } catch (IndexMissingException e) {}
+ }
+
+ @Test
+ public void convertWildcardsJustIndicesTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("testXYY"))
+ .put(indexBuilder("testYYY"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "ku*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "kuku")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"test*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*", "kuku"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
+ }
+
+ @Test
+ public void convertWildcardsTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX").putAlias(AliasMetaData.builder("alias1")).putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testXYY").putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testYYY").putAlias(AliasMetaData.builder("alias3")))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testYY*", "alias*"}, IndicesOptions.lenient())), equalTo(newHashSet("alias1", "alias2", "alias3", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"-kuku"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+test*", "-testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testX*", "+testYYY"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testYYY", "+testX*"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ }
+
+ private IndexMetaData.Builder indexBuilder(String index) {
+ return IndexMetaData.builder(index).settings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void concreteIndicesIgnoreIndicesOneMissingIndex() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ md.concreteIndices(new String[]{"testZZZ"}, IndicesOptions.strict());
+ }
+
+ @Test
+ public void concreteIndicesIgnoreIndicesOneMissingIndexOtherFound() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{"testXXX", "testZZZ"}, IndicesOptions.lenient())), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void concreteIndicesIgnoreIndicesAllMissing() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{"testMo", "testMahdy"}, IndicesOptions.strict())), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test
+ public void concreteIndicesIgnoreIndicesEmptyRequest() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(new String[]{}, IndicesOptions.lenient())), equalTo(Sets.<String>newHashSet("kuku", "testXXX")));
+ }
+
+ @Test
+ public void testIsAllIndices_null() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(null), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_empty() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[0]), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAll() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAllPlusOther() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_normalIndexes() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_wildcard() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isAllIndices(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_null() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(null), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_empty() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[0]), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAll() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAllPlusOther() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_normalIndexes() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_wildcard() throws Exception {
+ MetaData metaData = MetaData.builder().build();
+ assertThat(metaData.isExplicitAllPattern(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_explicitList() throws Exception {
+ //even though it does identify all indices, it's not a pattern but just an explicit list of them
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] indicesOrAliases = concreteIndices;
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_onlyWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3", "a", "b"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = concreteIndices;
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ private MetaData metaDataBuilder(String... indices) {
+ MetaData.Builder mdBuilder = MetaData.builder();
+ for (String concreteIndex : indices) {
+ mdBuilder.put(indexBuilder(concreteIndex));
+ }
+ return mdBuilder.build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
new file mode 100644
index 0000000..013c765
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonFromAndTo() throws IOException {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1")
+ .numberOfShards(1)
+ .numberOfReplicas(2))
+ .put(IndexMetaData.builder("test2")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(2)
+ .numberOfReplicas(3))
+ .put(IndexMetaData.builder("test3")
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1))
+ .put(IndexMetaData.builder("test4")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2))
+ .put(IndexMetaData.builder("test5")
+ .settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test6")
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")
+ .put("index.aliases.0", "alias3")
+ .put("index.aliases.1", "alias1"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test7")
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")
+ .put("index.aliases.0", "alias3")
+ .put("index.aliases.1", "alias1"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
+ .putAlias(newAliasMetaDataBuilder("alias2"))
+ .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1).settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2")))
+ .build();
+
+ String metaDataSource = MetaData.Builder.toXContent(metaData);
+// System.out.println("ToJson: " + metaDataSource);
+
+ MetaData parsedMetaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.JSON).createParser(metaDataSource));
+
+ IndexMetaData indexMetaData = parsedMetaData.index("test1");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test2");
+ assertThat(indexMetaData.numberOfShards(), equalTo(2));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(3));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test3");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().size(), equalTo(1));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+
+ indexMetaData = parsedMetaData.index("test4");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+
+ indexMetaData = parsedMetaData.index("test5");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(2));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+
+ indexMetaData = parsedMetaData.index("test6");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(3));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias3").alias(), equalTo("alias3"));
+
+ indexMetaData = parsedMetaData.index("test7");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(4));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias3").alias(), equalTo("alias3"));
+ assertThat(indexMetaData.aliases().get("alias3").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
+ assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
+
+ // templates
+ assertThat(parsedMetaData.templates().get("foo").name(), is("foo"));
+ assertThat(parsedMetaData.templates().get("foo").template(), is("bar"));
+ assertThat(parsedMetaData.templates().get("foo").settings().get("index.setting1"), is("value1"));
+ assertThat(parsedMetaData.templates().get("foo").settings().getByPrefix("index.").get("setting2"), is("value2"));
+ }
+
+ private static final String MAPPING_SOURCE1 = "{\"mapping1\":{\"text1\":{\"type\":\"string\"}}}";
+ private static final String MAPPING_SOURCE2 = "{\"mapping2\":{\"text2\":{\"type\":\"string\"}}}";
+ private static final String ALIAS_FILTER1 = "{\"field1\":\"value1\"}";
+ private static final String ALIAS_FILTER2 = "{\"field2\":\"value2\"}";
+}
diff --git a/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
new file mode 100644
index 0000000..8b6ebff
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DiscoveryNodeFiltersTests extends ElasticsearchTestCase {
+
+ @Test
+ public void nameMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.name", "name1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx._id", "id1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idOrNameMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx._id", "id1,blah")
+ .put("xxx.name", "blah,name2")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void tagAndGroupMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.tag", "A")
+ .put("xxx.group", "B")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(AND, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "F", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+
+ node = new DiscoveryNode("name4", "id4", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void starMatch() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("xxx.name", "*")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
new file mode 100644
index 0000000..9563cd0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
@@ -0,0 +1,435 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class AddIncrementallyTests extends ElasticsearchAllocationTestCase {
+ private final ESLogger logger = Loggers.getLogger(AddIncrementallyTests.class);
+
+ @Test
+ public void testAddNodesAndIndices() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertAtLeastOneIndexShardPerNode(clusterState);
+ clusterState = removeNodes(clusterState, service, 1);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+
+ clusterState = addIndex(clusterState, service, 3, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, "test3", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+
+ clusterState = addIndex(clusterState, service, 4, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, "test4", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ clusterState = removeNodes(clusterState, service, 1);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocations() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 2);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocationsNoLimit() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+
+ private void assertNumIndexShardsPerNode(ClusterState state, Matcher<Integer> matcher) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+ assertNumIndexShardsPerNode(state, index, matcher);
+ }
+ }
+
+ private void assertNumIndexShardsPerNode(ClusterState state, String index, Matcher<Integer> matcher) {
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), matcher);
+ }
+ }
+
+
+ private void assertAtLeastOneIndexShardPerNode(ClusterState state) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(1));
+ }
+ }
+
+ }
+
+ private ClusterState addNodes(ClusterState clusterState, AllocationService service, int numNodes, int nodeOffset) {
+ logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = 0; i < numNodes; i++) {
+ nodes.put(newNode("node" + (i + nodeOffset)));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState initCluster(AllocationService service, int numberOfNodes, int numberOfIndices, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addIndex(ClusterState clusterState, AllocationService service, int indexOrdinal, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.getMetaData());
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable());
+
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ IndexMetaData imd = index.build();
+ metaDataBuilder = metaDataBuilder.put(imd, true);
+ routingTableBuilder.addAsNew(imd);
+
+ MetaData metaData = metaDataBuilder.build();
+ RoutingTable routingTable = routingTableBuilder.build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService service, int numNodes) {
+ logger.info("Removing [{}] nodes", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ ArrayList<DiscoveryNode> discoveryNodes = Lists.newArrayList(clusterState.nodes());
+ Collections.shuffle(discoveryNodes, getRandom());
+ for (DiscoveryNode node : discoveryNodes) {
+ nodes.remove(node.id());
+ numNodes--;
+ if (numNodes <= 0) {
+ break;
+ }
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
new file mode 100644
index 0000000..a749535
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocatePostApiFlagTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocatePostApiFlagTests.class);
+
+ @Test
+ public void simpleFlagTests() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
new file mode 100644
index 0000000..2e76b20
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocationCommandsTests.class);
+
+ @Test
+ public void moveShardCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("move the shard");
+ String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String toNodeId;
+ if ("node1".equals(existingNodeId)) {
+ toNodeId = "node2";
+ } else {
+ toNodeId = "node1";
+ }
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), existingNodeId, toNodeId)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(existingNodeId).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("finish moving the shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(existingNodeId).isEmpty(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void allocateCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ .put(newNode("node4", ImmutableMap.of("data", Boolean.FALSE.toString())))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to false, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating to non-data node, should fail");
+ try {
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node4", true)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on the primary shard node, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> verify that we fail when there are no unassigned shards");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node3", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void cancelCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the relocation allocation");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the primary being replicated, make sure it fails");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> cancel allocation of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> move the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), "node2", "node3")));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the move of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node3", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+
+ logger.info("--> cancel the primary allocation (with allow_primary set to true)");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", true)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+ }
+
+ @Test
+ public void serialization() throws Exception {
+ AllocationCommands commands = new AllocationCommands(
+ new AllocateAllocationCommand(new ShardId("test", 1), "node1", true),
+ new MoveAllocationCommand(new ShardId("test", 3), "node2", "node3"),
+ new CancelAllocationCommand(new ShardId("test", 4), "node5", true)
+ );
+ BytesStreamOutput bytes = new BytesStreamOutput();
+ AllocationCommands.writeTo(commands, bytes);
+ AllocationCommands sCommands = AllocationCommands.readFrom(new BytesStreamInput(bytes.bytes()));
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+
+ @Test
+ public void xContent() throws Exception {
+ String commands = "{\n" +
+ " \"commands\" : [\n" +
+ " {\"allocate\" : {\"index\" : \"test\", \"shard\" : 1, \"node\" : \"node1\", \"allow_primary\" : true}}\n" +
+ " ,{\"move\" : {\"index\" : \"test\", \"shard\" : 3, \"from_node\" : \"node2\", \"to_node\" : \"node3\"}} \n" +
+ " ,{\"cancel\" : {\"index\" : \"test\", \"shard\" : 4, \"node\" : \"node5\", \"allow_primary\" : true}} \n" +
+ " ]\n" +
+ "}\n";
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(commands);
+ // move two tokens, parser expected to be "on" `commands` field
+ parser.nextToken();
+ parser.nextToken();
+ AllocationCommands sCommands = AllocationCommands.fromXContent(parser);
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000..929fb54
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,827 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(STARTED)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(RELOCATING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(UNASSIGNED)) {
+ logger.info(shard.toString());
+ }
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded4() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded4'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded5() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded5'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded6() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded6'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(3))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node4", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node6", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void fullAwareness1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void testUnbalancedZones() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table for 'testUnbalancedZones'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("A-0", ImmutableMap.of("zone", "a")))
+ .put(newNode("B-0", ImmutableMap.of("zone", "b")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> all replicas are allocated and started since we have on node in each zone");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node in zone 'a' and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("A-1", ImmutableMap.of("zone", "a")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("A-1"));
+ logger.info("--> starting initializing shards on the new node");
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("A-1").size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("A-0").size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("B-0").size(), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
new file mode 100644
index 0000000..a7c29af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.gateway.none.NoneGatewayAllocator;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class);
+ // TODO maybe we can randomize these numbers somehow
+ final int numberOfNodes = 25;
+ final int numberOfIndices = 12;
+ final int numberOfShards = 2;
+ final int numberOfReplicas = 2;
+
+ @Test
+ public void testIndexBalance() {
+ /* Tests balance over indices only */
+ final float indexBalance = 1.0f;
+ final float replicaBalance = 0.0f;
+ final float primaryBalance = 0.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ @Test
+ public void testReplicaBalance() {
+ /* Tests balance over replicas only */
+ final float indexBalance = 0.0f;
+ final float replicaBalance = 1.0f;
+ final float primaryBalance = 0.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ @Test
+ public void testPrimaryBalance() {
+ /* Tests balance over primaries only */
+ final float indexBalance = 0.0f;
+ final float replicaBalance = 0.0f;
+ final float primaryBalance = 1.0f;
+ final float balanceTreshold = 1.0f;
+
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterstate = initCluster(strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterstate = addNode(clusterstate, strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterstate = removeNodes(clusterstate, strategy);
+ assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1 - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+ }
+
+ private ClusterState initCluster(AllocationService strategy) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
+ logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node" + numberOfNodes)))
+ .build();
+
+ RoutingTable routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) {
+ logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+
+ for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) {
+ nodes.remove("node" + i);
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+
+ private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+ final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() + ": " + node.shardsWithState(INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+
+ private void assertIndexBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() +":"+index+ ": " + node.shardsWithState(index, INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ private void assertPrimaryBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards;
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+ int primaries = 0;
+ for (ShardRouting shard : node.shardsWithState(index, STARTED)) {
+ primaries += shard.primary() ? 1 : 0;
+ }
+// logger.info(node.nodeId() + ": " + primaries + " primaries ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(primaries, Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(primaries, Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ @Test
+ public void testPersistedSettings() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.5);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0);
+ final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1];
+ NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) {
+
+ @Override
+ public void addListener(Listener listener) {
+ assertNull("addListener was called twice while only one time was expected", listeners[0]);
+ listeners[0] = listener;
+ }
+
+ };
+ BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service);
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1);
+ settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.4);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0);
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f));
+ assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.4f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f));
+ }
+
+ @Test
+ public void testNoRebalanceOnPrimaryOverload() {
+ ImmutableSettings.Builder settings = settingsBuilder();
+ AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(),
+ new NodeSettingsService(ImmutableSettings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(),
+ new NoneGatewayAllocator(), new ShardsAllocator() {
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+
+
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ }
+
+ /*
+ * // this allocator tries to rebuild this scenario where a rebalance is
+ * // triggered solely by the primary overload on node [1] where a shard
+ * // is rebalanced to node 0
+ routing_nodes:
+ -----node_id[0][V]
+ --------[test][0], node[0], [R], s[STARTED]
+ --------[test][4], node[0], [R], s[STARTED]
+ -----node_id[1][V]
+ --------[test][0], node[1], [P], s[STARTED]
+ --------[test][1], node[1], [P], s[STARTED]
+ --------[test][3], node[1], [R], s[STARTED]
+ -----node_id[2][V]
+ --------[test][1], node[2], [R], s[STARTED]
+ --------[test][2], node[2], [R], s[STARTED]
+ --------[test][4], node[2], [P], s[STARTED]
+ -----node_id[3][V]
+ --------[test][2], node[3], [P], s[STARTED]
+ --------[test][3], node[3], [P], s[STARTED]
+ ---- unassigned
+ */
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
+ boolean changed = !unassigned.isEmpty();
+ for (MutableShardRouting sr : unassigned) {
+ switch (sr.id()) {
+ case 0:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ case 1:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 2:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 3:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node1");
+ }
+ break;
+ case 4:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node2");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ }
+
+ }
+ unassigned.clear();
+ return changed;
+ }
+ }), ClusterInfoService.EMPTY);
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1);
+ metaDataBuilder = metaDataBuilder.put(indexMeta);
+ MetaData metaData = metaDataBuilder.build();
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < 4; i++) {
+ DiscoveryNode node = newNode("node" + i);
+ nodes.put(node);
+ }
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
+ }
+ }
+ strategy = createAllocationService(settings.build());
+
+ logger.info("use the new allocator and check if it moves shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
new file mode 100644
index 0000000..89b0721
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
@@ -0,0 +1,626 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
+
+ @Test
+ public void testAlways() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+// assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+
+ @Test
+ public void testClusterPrimariesActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+ @Test
+ public void testClusterPrimariesActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the test2 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ @Test
+ public void testClusterAllActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive3() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
new file mode 100644
index 0000000..2f685b6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ConcurrentRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class);
+
+ @Test
+ public void testClusterConcurrentRebalance() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start, but, only 3 should be rebalancing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 3 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 2 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(2));
+
+ logger.info("finalize this session relocation, no more relocation");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(0));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
new file mode 100644
index 0000000..fee7858
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
+
+ @Test
+ public void simpleDeadNodeOnStartedPrimaryShard() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnToNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(origPrimaryNodeId))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnFromNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node3"))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
new file mode 100644
index 0000000..f58b3cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DisableAllocationTests.class);
+
+ @Test
+ public void testClusterDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterDisableReplicaAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.disable_replica_allocation", true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder().put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
new file mode 100644
index 0000000..80d1a15
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ElectReplicaAsPrimaryDuringRelocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
+
+ @Test
+ public void testElectReplicaAsPrimaryDuringRelocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+
+ logger.info("Start another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("find the replica shard that gets relocated");
+ IndexShardRoutingTable indexShardRoutingTable = null;
+ if (routingTable.index("test").shard(0).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(0);
+ } else if (routingTable.index("test").shard(1).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(1);
+ }
+
+ // we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation
+ if (indexShardRoutingTable != null) {
+ logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("make sure all the primary shards are active");
+ assertThat(routingTable.index("test").shard(0).primaryShard().active(), equalTo(true));
+ assertThat(routingTable.index("test").shard(1).primaryShard().active(), equalTo(true));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
new file mode 100644
index 0000000..e5ac333
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
+
+ @Test
+ public void simpleFailedNodeTest() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNode.numberOfShardsWithState(INITIALIZING), equalTo(1));
+ }
+ }
+
+ @Test
+ public void simpleFailedNodeTestNoReassign() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.rerouteWithNoReassign(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ }
+ assertThat(routingNodes.unassigned().size(), equalTo(2));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
new file mode 100644
index 0000000..ca8bf49
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
+
+ @Test
+ public void testFailedShardPrimaryRelocatingToAndFrom() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node("node3").get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node1 being relocated");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node(origPrimaryNodeId).get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // check promotion of replica to primary
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(origReplicaNodeId));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(origPrimaryNodeId), equalTo("node3")));
+ }
+
+ @Test
+ public void failPrimaryStartedCheckReplicaElected() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned");
+ ShardRouting shardToFail = new ImmutableShardRouting(routingTable.index("test").shard(0).primaryShard());
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+
+ logger.info("fail the shard again, check that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, shardToFail).changed(), equalTo(false));
+ }
+
+ @Test
+ public void firstAllocationFailureSingleNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding single node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void singleShardMultipleAllocationFailures() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+ int numberOfReplicas = scaledRandomIntBetween(2, 10);
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(numberOfReplicas))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1);
+ DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfReplicas + 1; i++) {
+ nodeBuilder.put(newNode("node" + Integer.toString(i)));
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
+ while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) {
+ // start all initializing
+ clusterState = ClusterState.builder(clusterState)
+ .routingTable(strategy
+ .applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)).routingTable()
+ )
+ .build();
+ // and assign more unassigned
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+ }
+
+ int shardsToFail = randomIntBetween(1, numberOfReplicas);
+ ArrayList<ShardRouting> failedShards = new ArrayList<ShardRouting>();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ for (int i = 0; i < shardsToFail; i++) {
+ String n = "node" + Integer.toString(randomInt(numberOfReplicas));
+ logger.info("failing shard on node [{}]", n);
+ ShardRouting shardToFail = routingNodes.node(n).get(0);
+ failedShards.add(new MutableShardRouting(shardToFail));
+ }
+
+ routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable();
+
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (ShardRouting failedShard : failedShards) {
+ if (!routingNodes.node(failedShard.currentNodeId()).isEmpty()) {
+ fail("shard " + failedShard + " was re-assigned to it's node");
+ }
+ }
+ }
+
+ @Test
+ public void firstAllocationFailureTwoNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will start INITIALIZING on the second node");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void rebalanceFailure() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Adding third node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+
+ logger.info("Fail the shards on node 3");
+ ShardRouting shardToFail = routingNodes.node("node3").get(0);
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting(shardToFail)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // make sure the failedShard is not INITIALIZING again on node3
+ assertThat(routingNodes.node("node3").get(0).shardId(), not(equalTo(shardToFail.shardId())));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
new file mode 100644
index 0000000..b9e11a8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class);
+
+ @Test
+ public void testClusterFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.include.tag1", "value1,value2")
+ .put("cluster.routing.allocation.exclude.tag1", "value3,value4")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding four nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+ }
+
+ @Test
+ public void testIndexFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value2")
+ .put("index.routing.allocation.exclude.tag1", "value3,value4")
+ .build()))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+
+ logger.info("--> switch between value2 and value4, shards should be relocating");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value4")
+ .put("index.routing.allocation.exclude.tag1", "value2,value3")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2));
+
+ logger.info("--> finish relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node4")));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
new file mode 100644
index 0000000..1536a12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
@@ -0,0 +1,538 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
new file mode 100644
index 0000000..a8239eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
+
+ @Test
+ public void testDoNotAllocateFromPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", getPreviousVersion())))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
+ }
+ }
+
+
+ @Test
+ public void testRandom() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+ MetaData.Builder builder = MetaData.builder();
+ RoutingTable.Builder rtBuilder = RoutingTable.builder();
+ int numIndices = between(1, 20);
+ for (int i = 0; i < numIndices; i++) {
+ builder.put(IndexMetaData.builder("test_" + i).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
+ }
+ MetaData metaData = builder.build();
+
+ for (int i = 0; i < numIndices; i++) {
+ rtBuilder.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rtBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(routingTable.allShards().size()));
+ List<DiscoveryNode> nodes = new ArrayList<DiscoveryNode>();
+ int nodeIdx = 0;
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ int numNodes = between(1, 20);
+ if (nodes.size() > numNodes) {
+ Collections.shuffle(nodes, getRandom());
+ nodes = nodes.subList(0, numNodes);
+ } else {
+ for (int j = nodes.size(); j < numNodes; j++) {
+ if (frequently()) {
+ nodes.add(newNode("node" + (nodeIdx++), randomBoolean() ? getPreviousVersion() : Version.CURRENT));
+ } else {
+ nodes.add(newNode("node" + (nodeIdx++), randomVersion()));
+ }
+ }
+ }
+ for (DiscoveryNode node : nodes) {
+ nodesBuilder.put(node);
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ clusterState = stabilize(clusterState, service);
+ }
+ }
+
+ @Test
+ public void testRollingRestart() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", getPreviousVersion()))
+ .put(newNode("old1", getPreviousVersion()))
+ .put(newNode("old2", getPreviousVersion()))).build();
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", getPreviousVersion()))
+ .put(newNode("old1", getPreviousVersion()))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node0", getPreviousVersion()))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("new2"))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+ routingTable = clusterState.routingTable();
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
+ }
+ }
+
+ private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
+ logger.trace("RoutingNodes: {}", clusterState.routingNodes().prettyPrint());
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertRecoveryNodeVersions(routingNodes);
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ boolean stable = false;
+ for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests
+ logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (stable = (routingTable == prev)) {
+ break;
+ }
+ assertRecoveryNodeVersions(routingNodes);
+ prev = routingTable;
+ }
+ logger.info("stabilized success [{}]", stable);
+ assertThat(stable, is(true));
+ return clusterState;
+ }
+
+ private final void assertRecoveryNodeVersions(RoutingNodes routingNodes) {
+ logger.trace("RoutingNodes: {}", routingNodes.prettyPrint());
+
+ List<MutableShardRouting> mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ String toId = r.relocatingNodeId();
+ String fromId = r.currentNodeId();
+ assertThat(fromId, notNullValue());
+ assertThat(toId, notNullValue());
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+
+ mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.INITIALIZING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ if (r.initializing() && r.relocatingNodeId() == null && !r.primary()) {
+ MutableShardRouting primary = routingNodes.activePrimary(r);
+ assertThat(primary, notNullValue());
+ String fromId = primary.currentNodeId();
+ String toId = r.currentNodeId();
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+ }
+
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
new file mode 100644
index 0000000..2630527
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferLocalPrimariesToRelocatingPrimariesTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testPreferLocalPrimaryAllocationOverFiltered() {
+ int concurrentRecoveries = randomIntBetween(1, 10);
+ int primaryRecoveries = randomIntBetween(1, 10);
+ int numberOfShards = randomIntBetween(5, 20);
+ int totalNumberOfShards = numberOfShards * 2;
+
+ logger.info("create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries", primaryRecoveries, concurrentRecoveries);
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", concurrentRecoveries)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", primaryRecoveries)
+ .build());
+
+ logger.info("create 2 indices with [{}] no replicas, and wait till all are allocated", numberOfShards);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").numberOfShards(numberOfShards).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("remove one of the nodes and apply filter to move everything from another node");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .put(IndexMetaData.builder("test2").settings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards);
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(numberOfShards));
+
+ logger.info("start node back up");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (clusterState.routingNodes().shardsWithState(STARTED).size() < totalNumberOfShards) {
+ int localInitializations = 0;
+ int relocatingInitializations = 0;
+ for (MutableShardRouting routing : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ if (routing.relocatingNodeId() == null) {
+ localInitializations++;
+ } else {
+ relocatingInitializations++;
+ }
+ }
+ int needToInitialize = totalNumberOfShards - clusterState.routingNodes().shardsWithState(STARTED).size() - clusterState.routingNodes().shardsWithState(RELOCATING).size();
+ logger.info("local initializations: [{}], relocating: [{}], need to initialize: {}", localInitializations, relocatingInitializations, needToInitialize);
+ assertThat(localInitializations, equalTo(Math.min(primaryRecoveries, needToInitialize)));
+ clusterState = startRandomInitializingShard(clusterState, strategy);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
new file mode 100644
index 0000000..f1cd54a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferPrimaryAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class);
+
+ @Test
+ public void testPreferPrimaryAllocationOverReplicas() {
+ logger.info("create an allocation with 1 initial recoveries");
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 1)
+ .build());
+
+ logger.info("create several indices with no replicas, and wait till all are allocated");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(10).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").numberOfShards(10).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("increasing the number of replicas to 1, and perform a reroute (to get the replicas allocation going)");
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("2 replicas should be initializing now for the existing indices (we throttle to 1)");
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("create a new index");
+ metaData = MetaData.builder(clusterState.metaData())
+ .put(IndexMetaData.builder("new_index").numberOfShards(4).numberOfReplicas(0))
+ .build();
+
+ routingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(metaData.index("new_index"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("reroute, verify that primaries for the new index primary shards are allocated");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingTable().index("new_index").shardsWithState(INITIALIZING).size(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
new file mode 100644
index 0000000..30ad415
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PrimaryElectionRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class);
+
+ @Test
+ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the backup shard (on node2)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Adding third node and reroute and kill first node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingNodes.node("node1"), nullValue());
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // verify where the primary is
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+
+ @Test
+ public void testRemovingInitializingReplicasIfPrimariesFails() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ rerouteResult = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // now, fail one node, while the replica is initializing, and it also holds a primary
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(INITIALIZING).get(0).primary(), equalTo(true));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
new file mode 100644
index 0000000..ee47827
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class);
+
+
+ @Test
+ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+
+ logger.info("start another node, replica will start recovering form primary");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("start another node, make sure the primary is not relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
new file mode 100644
index 0000000..5cbd22d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaData.Builder;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ /* This test will make random allocation decision on a growing and shrinking
+ * cluster leading to a random distribution of the shards. After a certain
+ * amount of iterations the test allows allocation unless the same shard is
+ * already allocated on a node and balances the cluster to gain optimal
+ * balance.*/
+ @Test
+ public void testRandomDecisions() {
+ RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
+ AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ randomAllocationDecider))), new ShardsAllocators(), ClusterInfoService.EMPTY);
+ int indices = between(1, 20);
+ Builder metaBuilder = MetaData.builder();
+ int maxNumReplicas = 1;
+ int totalNumShards = 0;
+ for (int i = 0; i < indices; i++) {
+ int replicas = between(0, 6);
+ maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
+ int numShards = between(1, 20);
+ totalNumShards += numShards * (replicas + 1);
+ metaBuilder.put(IndexMetaData.builder("INDEX_" + i).numberOfShards(numShards).numberOfReplicas(replicas));
+
+ }
+ MetaData metaData = metaBuilder.build();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < indices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ int numIters = atLeast(20);
+ int nodeIdCounter = 0;
+ int atMostNodes = between(Math.max(1, maxNumReplicas), numIters);
+ final boolean frequentNodes = randomBoolean();
+ for (int i = 0; i < numIters; i++) {
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+
+ if (clusterState.nodes().size() <= atMostNodes &&
+ (nodeIdCounter == 0 || (frequentNodes ? frequently() : rarely()))) {
+ int numNodes = atLeast(1);
+ for (int j = 0; j < numNodes; j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ }
+
+ if (nodeIdCounter > 1 && rarely()) {
+ int nodeId = between(0, nodeIdCounter - 2);
+ logger.info("removing node [{}]", nodeId);
+ newNodesBuilder.remove("NODE_" + nodeId);
+ }
+
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ }
+ logger.info("Fill up nodes such that every shard can be allocated");
+ if (clusterState.nodes().size() < maxNumReplicas) {
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int j = 0; j < (maxNumReplicas - clusterState.nodes().size()); j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ }
+
+
+ randomAllocationDecider.allwaysSayYes = true;
+ logger.info("now say YES to everything");
+ int iterations = 0;
+ do {
+ iterations++;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ } while (clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 ||
+ clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
+ logger.info("Done Balancing after [{}] iterations", iterations);
+ // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
+ assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
+ int shards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size();
+ assertThat(shards, equalTo(totalNumShards));
+ final int numNodes = clusterState.nodes().size();
+ final int upperBound = (int) Math.round(((shards / numNodes) * 1.10));
+ final int lowerBound = (int) Math.round(((shards / numNodes) * 0.90));
+ for (int i = 0; i < nodeIdCounter; i++) {
+ if (clusterState.getRoutingNodes().node("NODE_" + i) == null) {
+ continue;
+ }
+ assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf(
+ Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))),
+ Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound))));
+ }
+ }
+
+ private static final class RandomAllocationDecider extends AllocationDecider {
+
+ private final Random random;
+
+ public RandomAllocationDecider(Random random) {
+ super(ImmutableSettings.EMPTY);
+ this.random = random;
+ }
+
+ public boolean allwaysSayYes = false;
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ private Decision getRandomDecision() {
+ if (allwaysSayYes) {
+ return Decision.YES;
+ }
+ switch (random.nextInt(10)) {
+ case 9:
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ return Decision.NO;
+ case 4:
+ return Decision.THROTTLE;
+ case 3:
+ case 2:
+ case 1:
+ return Decision.YES;
+ default:
+ return Decision.ALWAYS;
+ }
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
new file mode 100644
index 0000000..c3f9173
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class RebalanceAfterActiveTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(RebalanceAfterActiveTests.class);
+
+ @Test
+ public void testRebalanceOnlyAfterAllShardsAreActive() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(5));
+
+ logger.info("complete relocation, other half of relocation should happen");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we now only relocate 3, since 2 remain where they are!
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("complete relocation, thats it!");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ // make sure we have an even relocation
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.size(), equalTo(1));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
new file mode 100644
index 0000000..ad11807
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ReplicaAllocatedAfterPrimaryTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
+
+ @Test
+ public void testBackupIsAllocatedAfterPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
new file mode 100644
index 0000000..c545428
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
@@ -0,0 +1,415 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ // all shards are unassigned. so no inactive shards or primaries.
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, assign");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Reroute, start the primaries");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Reroute, start the replicas");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ logger.info("kill one node");
+ IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0);
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ // replica got promoted to primary
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 1");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ }
+
+ private boolean assertShardStats(RoutingNodes routingNodes) {
+ return RoutingNodes.assertShardStats(routingNodes);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
new file mode 100644
index 0000000..74106e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+
+/**
+ */
+public class RoutingNodesUtils {
+
+ public static int numberOfShardsOfType(RoutingNodes nodes, ShardRoutingState state) {
+ int count = 0;
+ for (RoutingNode routingNode : nodes) {
+ count += routingNode.numberOfShardsWithState(state);
+ }
+ return count;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
new file mode 100644
index 0000000..c35959c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SameShardRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SameShardRoutingTests.class);
+
+ @Test
+ @TestLogging("cluster.routing.allocation:TRACE")
+ public void sameHost() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(SameShardAllocationDecider.SAME_HOST_SETTING, true).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes with the same host");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(new DiscoveryNode("node1", "node1", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))
+ .put(new DiscoveryNode("node2", "node2", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("--> start all primary shards, no replica will be started since its on the same host");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(0));
+
+ logger.info("--> add another node, with a different host, replicas will be allocating");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(new DiscoveryNode("node3", "node3", "test2", "test2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)) {
+ assertThat(shardRouting.currentNodeId(), equalTo("node3"));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
new file mode 100644
index 0000000..e21246b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ShardVersioningTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class);
+
+ @Test
+ public void simple() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(2l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).version(), equalTo(2l));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).version(), equalTo(1l));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
new file mode 100644
index 0000000..1086670
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);
+
+ @Test
+ public void indexLevelShardsLimitAllocate() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().unassigned().size(), equalTo(4));
+
+ logger.info("Do another reroute, make sure its still not allocated");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ @Test
+ public void indexLevelShardsLimitRemain() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(5));
+
+ logger.info("add another index with 5 shards");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Add another one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(10));
+
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node1")) {
+ assertThat(shardRouting.index(), equalTo("test"));
+ }
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node2")) {
+ assertThat(shardRouting.index(), equalTo("test1"));
+ }
+
+ logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE + " for test, see that things move");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)
+ ))
+ .build();
+
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+
+ logger.info("reroute after setting");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(3));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(3));
+ // the first move will destroy the balance and the balancer will move 2 shards from node2 to node one right after
+ // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
new file mode 100644
index 0000000..a72fd5b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);
+
+ @Test
+ public void testSingleIndexStartedShard() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Rerouting again, nothing should change");
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Marking the shard as started");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Starting another node and making sure nothing changed");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Killing node1 where the shard is, checking the shard is relocated");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+
+ logger.info("Start the shard on node 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ @Test
+ public void testSingleIndexShardFailed() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Marking the shard as failed");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ @Test
+ public void testMultiIndexEvenDistribution() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 50;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding " + (numberOfIndices / 2) + " nodes");
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ List<DiscoveryNode> nodes = newArrayList();
+ for (int i = 0; i < (numberOfIndices / 2); i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ RoutingTable prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards initializing per node on the first 25 nodes
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ Set<String> encounteredIndices = newHashSet();
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0));
+ assertThat(routingNode.size(), equalTo(2));
+ // make sure we still have 2 shards initializing per node on the only 25 nodes
+ int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ // check that we don't have a shard associated with a node with the same index name (we have a single shard)
+ for (MutableShardRouting shardRoutingEntry : routingNode) {
+ assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.index())));
+ encounteredIndices.add(shardRoutingEntry.index());
+ }
+ }
+
+ logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
+ nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+
+ logger.info("Marking the shard as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ int numberOfRelocatingShards = 0;
+ int numberOfStartedShards = 0;
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
+ if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) {
+ numberOfStartedShards++;
+ } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
+ numberOfRelocatingShards++;
+ }
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards either relocating or started on the first 25 nodes (still)
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ assertThat(numberOfRelocatingShards, equalTo(25));
+ assertThat(numberOfStartedShards, equalTo(25));
+ }
+
+ @Test
+ public void testMultiIndexUnevenNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 10;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+
+ logger.info("Starting 3 nodes and rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")))
+ .build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(numberOfIndices));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+
+ logger.info("Start two more nodes, things should remain the same");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5")))
+ .build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat("4 source shard routing are relocating", numberOfShardsOfType(routingNodes, RELOCATING), equalTo(4));
+ assertThat("4 target shard routing are initializing", numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(4));
+
+ logger.info("Now, mark the relocated as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+// routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes);
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices));
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(2));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
new file mode 100644
index 0000000..82dab0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SingleShardOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Kill node1, backup shard should become primary");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start another node, backup shard should start initializing");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
new file mode 100644
index 0000000..897ae5f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TenShardsOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
+
+ logger.info("Add another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(6));
+
+ logger.info("Start the shards on node 3");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
new file mode 100644
index 0000000..64859c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
+
+ @Test
+ public void testPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(17));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(14));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(11));
+
+ logger.info("start initializing, another 1 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+ }
+
+ @Test
+ public void testReplicaAndPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(7));
+
+ logger.info("start initializing, another 2 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start another node, replicas should start being allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ logger.info("start initializing replicas");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+
+ logger.info("start initializing replicas, all should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000..fd79255
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class UpdateNumberOfReplicasTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class);
+
+ @Test
+ public void testUpdateNumberOfReplicas() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+
+ logger.info("add another replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(2).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(2));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
+
+ logger.info("Add another node and start the added replica");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("now remove a replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(1));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("do a reroute, should remain the same");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
new file mode 100644
index 0000000..674d849
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
@@ -0,0 +1,597 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void diskThresholdTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
+ usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary shard should be initializing, replica should not
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ // Assert that node1 didn't get any shards because its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.6)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node4 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithAbsoluteSizesTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "20b").build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
+ usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary shard should be initializing, replica should not
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ // Assert that node1 didn't get any shards because its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build();
+
+ deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node4 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithShardSizes() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.71).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node1", new DiskUsage("node1", 100, 31)); // 69% used
+ usages.put("node2", new DiskUsage("node2", 100, 1)); // 99% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shard can't be allocated to node1 (or node2) because it would cause too much usage
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ // No shards are started, no nodes have enough disk for allocation
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ }
+
+ @Test
+ public void unknownDiskUsageTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build();
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ Map<String, Long> shardSizes = new HashMap<String, Long>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
+ new HashSet<AllocationDecider>(Arrays.asList(
+ new SameShardAllocationDecider(ImmutableSettings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, new ShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shard can't be allocated to node1 (or node2) because the average usage is 75% > 70%
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ // No shards are started, node1 doesn't have enough disk usage
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ }
+
+ @Test
+ public void averageUsageUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ DiskUsage node1Usage = decider.averageUsage(rn, usages);
+ assertThat(node1Usage.getTotalBytes(), equalTo(100L));
+ assertThat(node1Usage.getFreeBytes(), equalTo(25L));
+ }
+
+ @Test
+ public void freeDiskPercentageAfterShardAssignedUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
+ usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
+
+ Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", 100, 30), 11L);
+ assertThat(after, equalTo(19.0));
+ }
+
+ public void logShardStates(ClusterState state) {
+ RoutingNodes rn = state.routingNodes();
+ logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shards(new Predicate<MutableShardRouting>() {
+ @Override
+ public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
+ return true;
+ }
+ }).size(),
+ rn.shardsWithState(UNASSIGNED).size(),
+ rn.shardsWithState(INITIALIZING).size(),
+ rn.shardsWithState(RELOCATING).size(),
+ rn.shardsWithState(STARTED).size());
+ logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shardsWithState(UNASSIGNED),
+ rn.shardsWithState(INITIALIZING),
+ rn.shardsWithState(RELOCATING),
+ rn.shardsWithState(STARTED));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
new file mode 100644
index 0000000..595b258
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(EnableAllocationTests.class);
+
+ @Test
+ public void testClusterEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterEnableOnlyPrimaries() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder()
+ .put(INDEX_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()))
+ .numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
new file mode 100644
index 0000000..505ccf2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ClusterSerializationTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ ClusterState serializedClusterState = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), newNode("node1"));
+
+ assertThat(serializedClusterState.routingTable().prettyPrint(), equalTo(clusterState.routingTable().prettyPrint()));
+ }
+
+
+ @Test
+ public void testRoutingTableSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ RoutingTable source = strategy.reroute(clusterState).routingTable();
+
+ BytesStreamOutput outStream = new BytesStreamOutput();
+ RoutingTable.Builder.writeTo(source, outStream);
+ BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes(), false);
+ RoutingTable target = RoutingTable.Builder.readFrom(inStream);
+
+ assertThat(target.prettyPrint(), equalTo(source.prettyPrint()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
new file mode 100644
index 0000000..4504b6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class ClusterStateToStringTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test_idx").numberOfShards(10).numberOfReplicas(1))
+ .put(IndexTemplateMetaData.builder("test_template").build())
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test_idx"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", DummyTransportAddress.INSTANCE, Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build();
+
+ ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ String clusterStateString = clusterState.toString();
+ assertNotNull(clusterStateString);
+
+ assertThat(clusterStateString, containsString("test_idx"));
+ assertThat(clusterStateString, containsString("test_template"));
+ assertThat(clusterStateString, containsString("node_foo"));
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
new file mode 100644
index 0000000..f801530
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+@ClusterScope(scope = TEST)
+public class ClusterSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void clusterNonExistingSettingsUpdate() {
+ String key1 = "no_idea_what_you_are_talking_about";
+ int value1 = 10;
+
+ ClusterUpdateSettingsResponse response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(ImmutableSettings.builder().put(key1, value1).build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable());
+ }
+
+ @Test
+ public void clusterSettingsUpdateResponse() {
+ String key1 = "indices.cache.filter.size";
+ int value1 = 10;
+
+ String key2 = DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+ boolean value2 = true;
+
+ Settings transientSettings1 = ImmutableSettings.builder().put(key1, value1).build();
+ Settings persistentSettings1 = ImmutableSettings.builder().put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response1 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings1)
+ .setPersistentSettings(persistentSettings1)
+ .execute()
+ .actionGet();
+
+ assertAcked(response1);
+ assertThat(response1.getTransientSettings().get(key1), notNullValue());
+ assertThat(response1.getTransientSettings().get(key2), nullValue());
+ assertThat(response1.getPersistentSettings().get(key1), nullValue());
+ assertThat(response1.getPersistentSettings().get(key2), notNullValue());
+
+ Settings transientSettings2 = ImmutableSettings.builder().put(key1, value1).put(key2, value2).build();
+ Settings persistentSettings2 = ImmutableSettings.EMPTY;
+
+ ClusterUpdateSettingsResponse response2 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings2)
+ .setPersistentSettings(persistentSettings2)
+ .execute()
+ .actionGet();
+
+ assertAcked(response2);
+ assertThat(response2.getTransientSettings().get(key1), notNullValue());
+ assertThat(response2.getTransientSettings().get(key2), notNullValue());
+ assertThat(response2.getPersistentSettings().get(key1), nullValue());
+ assertThat(response2.getPersistentSettings().get(key2), nullValue());
+
+ Settings transientSettings3 = ImmutableSettings.EMPTY;
+ Settings persistentSettings3 = ImmutableSettings.builder().put(key1, value1).put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response3 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings3)
+ .setPersistentSettings(persistentSettings3)
+ .execute()
+ .actionGet();
+
+ assertAcked(response3);
+ assertThat(response3.getTransientSettings().get(key1), nullValue());
+ assertThat(response3.getTransientSettings().get(key2), nullValue());
+ assertThat(response3.getPersistentSettings().get(key1), notNullValue());
+ assertThat(response3.getPersistentSettings().get(key2), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
new file mode 100644
index 0000000..e87211f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SettingsValidatorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testValidators() throws Exception {
+ assertThat(Validator.EMPTY.validate("", "anything goes"), nullValue());
+
+ assertThat(Validator.TIME.validate("", "10m"), nullValue());
+ assertThat(Validator.TIME.validate("", "10g"), notNullValue());
+ assertThat(Validator.TIME.validate("", "bad timing"), notNullValue());
+
+ assertThat(Validator.BYTES_SIZE.validate("", "10m"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "10g"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "bad"), notNullValue());
+
+ assertThat(Validator.FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "2.0"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "1.0"), notNullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.INTEGER.validate("", "10"), nullValue());
+ assertThat(Validator.INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.INTEGER_GTE_2.validate("", "2"), nullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "1"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "0"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "0"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "10.2"), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
new file mode 100644
index 0000000..2233059
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.shards;
+
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=2)
+public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ switch(nodeOrdinal) {
+ case 1:
+ return settingsBuilder().put("node.tag", "B").build();
+ case 0:
+ return settingsBuilder().put("node.tag", "A").build();
+ }
+ return super.nodeSettings(nodeOrdinal);
+ }
+
+ @Test
+ public void testSingleShardAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "1").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("A").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ }
+
+ @Test
+ public void testMultipleShardsSingleNodeAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(4));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("ABC").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+
+ response = client().admin().cluster().prepareSearchShards("test").setPreference("_shards:2").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(2));
+ }
+
+ @Test
+ public void testMultipleIndicesAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test1").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareCreate("test2").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareAliases()
+ .addAliasAction(AliasAction.newAddAliasAction("test1", "routing_alias").routing("ABC"))
+ .addAliasAction(AliasAction.newAddAliasAction("test2", "routing_alias").routing("EFG"))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("routing_alias").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(2));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(2));
+ assertThat(response.getGroups()[1].getShards().length, equalTo(2));
+ boolean seenTest1 = false;
+ boolean seenTest2 = false;
+ for (ClusterSearchShardsGroup group : response.getGroups()) {
+ if (group.getIndex().equals("test1")) {
+ seenTest1 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else if (group.getIndex().equals("test2")) {
+ seenTest2 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else {
+ fail();
+ }
+ }
+ assertThat(seenTest1, equalTo(true));
+ assertThat(seenTest2, equalTo(true));
+ assertThat(response.getNodes().length, equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
new file mode 100644
index 0000000..137a8bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.structure;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
+import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
+import org.elasticsearch.cluster.routing.operation.plain.PlainOperationRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testEmptyIterator() {
+ ShardShuffler shuffler = new RotationShardShuffler(0);
+ ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.firstOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator1() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(2))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(3));
+ ShardRouting firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(3));
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ assertThat(shardIterator.remaining(), equalTo(3));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(firstRouting, sameInstance(shardRouting1));
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(2));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardRouting3, not(sameInstance(shardRouting1)));
+ assertThat(shardRouting3, not(sameInstance(shardRouting2)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator2() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(2));
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ assertThat(shardIterator.remaining(), equalTo(2));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, sameInstance(firstRouting));
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(1);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(firstRouting, sameInstance(shardRouting3));
+ assertThat(shardRouting1, notNullValue());
+ ShardRouting shardRouting4 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting1, not(sameInstance(shardRouting3)));
+ assertThat(shardRouting2, not(sameInstance(shardRouting4)));
+ assertThat(shardRouting1, sameInstance(shardRouting4));
+ assertThat(shardRouting2, sameInstance(shardRouting3));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(2);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting5 = shardIterator.nextOrNull();
+ assertThat(shardRouting5, sameInstance(firstRouting));
+ assertThat(shardRouting5, notNullValue());
+ ShardRouting shardRouting6 = shardIterator.nextOrNull();
+ assertThat(shardRouting6, notNullValue());
+ assertThat(shardRouting6, not(sameInstance(shardRouting5)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting5, sameInstance(shardRouting1));
+ assertThat(shardRouting6, sameInstance(shardRouting2));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(3);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting7 = shardIterator.nextOrNull();
+ assertThat(shardRouting7, sameInstance(firstRouting));
+ assertThat(shardRouting7, notNullValue());
+ ShardRouting shardRouting8 = shardIterator.nextOrNull();
+ assertThat(shardRouting8, notNullValue());
+ assertThat(shardRouting8, not(sameInstance(shardRouting7)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting7, sameInstance(shardRouting3));
+ assertThat(shardRouting8, sameInstance(shardRouting4));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(4);
+ assertThat(shardIterator.size(), equalTo(2));
+ firstRouting = shardIterator.firstOrNull();
+ assertThat(shardIterator.firstOrNull(), notNullValue());
+ assertThat(shardIterator.firstOrNull(), sameInstance(shardIterator.firstOrNull()));
+ ShardRouting shardRouting9 = shardIterator.nextOrNull();
+ assertThat(shardRouting9, sameInstance(firstRouting));
+ assertThat(shardRouting9, notNullValue());
+ ShardRouting shardRouting10 = shardIterator.nextOrNull();
+ assertThat(shardRouting10, notNullValue());
+ assertThat(shardRouting10, not(sameInstance(shardRouting9)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting9, sameInstance(shardRouting5));
+ assertThat(shardRouting10, sameInstance(shardRouting6));
+ }
+
+ @Test
+ public void testRandomRouting() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.nextOrNull(), notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardRouting1, not(sameInstance(shardRouting2)));
+ assertThat(shardRouting1, sameInstance(shardRouting3));
+ }
+
+ @Test
+ public void testAttributePreferenceRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.allow_rebalance", "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone")
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "rack_1", "zone", "zone1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "rack_2", "zone", "zone2")))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ // after all are started, check routing iteration
+ ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ ShardRouting shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+
+ shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+ }
+
+
+ @Test
+ public void testShardsAndPreferNodeRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ PlainOperationRouting operationRouting = new PlainOperationRouting(ImmutableSettings.Builder.EMPTY_SETTINGS, new DjbHashFunction(), new AwarenessAllocationDecider());
+
+ GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(1));
+
+ //check node preference, first without preference to see they switch
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ String firstRoundNodeId = shardIterators.iterator().next().nextOrNull().currentNodeId();
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId)));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/codecs/CodecTests.java b/src/test/java/org/elasticsearch/codecs/CodecTests.java
new file mode 100644
index 0000000..02c6f69
--- /dev/null
+++ b/src/test/java/org/elasticsearch/codecs/CodecTests.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.codecs;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.endsWith;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class CodecTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFieldsWithCustomPostingsFormat() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("postings_format", "test1").field("index_options", "docs").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.codec.postings_format.test1.type", "pulsing")
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ try {
+ client().prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testIndexingWithSimpleTextCodec() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.codec", "SimpleText")
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ try {
+ client().prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testCustomDocValuesFormat() throws IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("test", jsonBuilder().startObject().startObject("test")
+ .startObject("_version").field("doc_values_format", "disk").endObject()
+ .startObject("properties").startObject("field").field("type", "long").field("doc_values_format", "dvf").endObject().endObject()
+ .endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.dvf.type", "disk")
+ .build());
+
+ for (int i = 10; i >= 0; --i) {
+ client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", randomLong()).setRefresh(i == 0 || rarely()).execute().actionGet();
+ }
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).addSort(new FieldSortBuilder("field")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/BooleansTests.java b/src/test/java/org/elasticsearch/common/BooleansTests.java
new file mode 100644
index 0000000..7440d8c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/BooleansTests.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+public class BooleansTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIsBoolean() {
+ String[] booleans = new String[]{"true", "false", "on", "off", "yes", "no", "0", "1"};
+ String[] notBooleans = new String[]{"11", "00", "sdfsdfsf", "F", "T"};
+
+ for (String b : booleans) {
+ String t = "prefix" + b + "suffix";
+ assertThat("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), b.length()), Matchers.equalTo(true));
+ }
+
+ for (String nb : notBooleans) {
+ String t = "prefix" + nb + "suffix";
+ assertThat("recognized [" + nb + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), nb.length()), Matchers.equalTo(false));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/src/test/java/org/elasticsearch/common/ParseFieldTests.java
new file mode 100644
index 0000000..d1dca3a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/ParseFieldTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.EnumSet;
+
+import static org.hamcrest.CoreMatchers.*;
+
+public class ParseFieldTests extends ElasticsearchTestCase {
+
+ public void testParse() {
+ String[] values = new String[]{"foo_bar", "fooBar"};
+ ParseField field = new ParseField(randomFrom(values));
+ String[] deprecated = new String[]{"barFoo", "bar_foo"};
+ ParseField withDepredcations = field.withDeprecation("Foobar", randomFrom(deprecated));
+ assertThat(field, not(sameInstance(withDepredcations)));
+ assertThat(field.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(field.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match("barFoo", ParseField.EMPTY_FLAGS), is(false));
+
+
+ assertThat(withDepredcations.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDepredcations.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(withDepredcations.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDepredcations.match("barFoo", ParseField.EMPTY_FLAGS), is(true));
+
+ // now with strict mode
+ EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
+ assertThat(field.match(randomFrom(values), flags), is(true));
+ assertThat(field.match("foo bar", flags), is(false));
+ assertThat(field.match(randomFrom(deprecated), flags), is(false));
+ assertThat(field.match("barFoo", flags), is(false));
+
+
+ assertThat(withDepredcations.match(randomFrom(values), flags), is(true));
+ assertThat(withDepredcations.match("foo bar", flags), is(false));
+ try {
+ withDepredcations.match(randomFrom(deprecated), flags);
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+ try {
+ withDepredcations.match("barFoo", flags);
+ fail();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/StringsTests.java b/src/test/java/org/elasticsearch/common/StringsTests.java
new file mode 100644
index 0000000..e6f75aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/StringsTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class StringsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testToCamelCase() {
+ assertEquals("foo", Strings.toCamelCase("foo"));
+ assertEquals("fooBar", Strings.toCamelCase("fooBar"));
+ assertEquals("FooBar", Strings.toCamelCase("FooBar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar"));
+ assertEquals("fooBarFooBar", Strings.toCamelCase("foo_bar_foo_bar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar_"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/TableTests.java b/src/test/java/org/elasticsearch/common/TableTests.java
new file mode 100644
index 0000000..919e1c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/TableTests.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+public class TableTests extends ElasticsearchTestCase {
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnStartRowWithoutHeader() {
+ Table table = new Table();
+ table.startRow();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnEndHeadersWithoutStart() {
+ Table table = new Table();
+ table.endHeaders();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnAddCellWithoutHeader() {
+ Table table = new Table();
+ table.addCell("error");
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnAddCellWithoutRow() {
+ Table table = this.getTableWithHeaders();
+ table.addCell("error");
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnEndRowWithoutStart() {
+ Table table = this.getTableWithHeaders();
+ table.endRow();
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnLessCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(true);
+ }
+
+ @Test
+ public void testOnLessCellsThanDeclaredUnchecked() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(false);
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testFailOnMoreCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.addCell("bar");
+ table.addCell("foobar");
+ }
+
+ @Test
+ public void testSimple() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo1");
+ table.addCell("bar1");
+ table.endRow();
+ table.startRow();
+ table.addCell("foo2");
+ table.addCell("bar2");
+ table.endRow();
+
+ // Check headers
+ List<Table.Cell> headers = table.getHeaders();
+ assertEquals(2, headers.size());
+ assertEquals("foo", headers.get(0).value.toString());
+ assertEquals(2, headers.get(0).attr.size());
+ assertEquals("f", headers.get(0).attr.get("alias"));
+ assertEquals("foo", headers.get(0).attr.get("desc"));
+ assertEquals("bar", headers.get(1).value.toString());
+ assertEquals(2, headers.get(1).attr.size());
+ assertEquals("b", headers.get(1).attr.get("alias"));
+ assertEquals("bar", headers.get(1).attr.get("desc"));
+
+ // Check rows
+ List<List<Table.Cell>> rows = table.getRows();
+ assertEquals(2, rows.size());
+ List<Table.Cell> row = rows.get(0);
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("bar1", row.get(1).value.toString());
+ row = rows.get(1);
+ assertEquals("foo2", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getAsMap
+ Map<String, List<Table.Cell>> map = table.getAsMap();
+ assertEquals(2, map.size());
+ row = map.get("foo");
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("foo2", row.get(1).value.toString());
+ row = map.get("bar");
+ assertEquals("bar1", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getHeaderMap
+ Map<String, Table.Cell> headerMap = table.getHeaderMap();
+ assertEquals(2, headerMap.size());
+ Table.Cell cell = headerMap.get("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = headerMap.get("bar");
+ assertEquals("bar", cell.value.toString());
+
+ // Check findHeaderByName
+ cell = table.findHeaderByName("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = table.findHeaderByName("missing");
+ assertNull(cell);
+ }
+
+ private Table getTableWithHeaders() {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("foo", "alias:f;desc:foo");
+ table.addCell("bar", "alias:b;desc:bar");
+ table.endHeaders();
+ return table;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
new file mode 100644
index 0000000..5565cd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.breaker;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the Memory Aggregating Circuit Breaker
+ */
+public class MemoryCircuitBreakerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThreadedUpdatesToBreaker() throws Exception {
+ final int NUM_THREADS = 5;
+ final int BYTES_PER_THREAD = 1000;
+ final Thread[] threads = new Thread[NUM_THREADS];
+ final AtomicBoolean tripped = new AtomicBoolean(false);
+ final AtomicReference<Throwable> lastException = new AtomicReference<Throwable>(null);
+
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue((BYTES_PER_THREAD * NUM_THREADS) - 1), 1.0, logger);
+
+ for (int i = 0; i < NUM_THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < BYTES_PER_THREAD; j++) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(1L);
+ } catch (CircuitBreakingException e) {
+ if (tripped.get()) {
+ assertThat("tripped too many times", true, equalTo(false));
+ } else {
+ assertThat(tripped.compareAndSet(false, true), equalTo(true));
+ }
+ } catch (Throwable e2) {
+ lastException.set(e2);
+ }
+ }
+ }
+ });
+
+ threads[i].start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
+ assertThat("breaker was tripped exactly once", tripped.get(), equalTo(true));
+ }
+
+ @Test
+ public void testConstantFactor() throws Exception {
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(15), 1.6, logger);
+
+ // add only 7 bytes
+ breaker.addWithoutBreaking(7);
+
+ try {
+ // this won't actually add it because it trips the breaker
+ breaker.addEstimateBytesAndMaybeBreak(3);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ }
+
+ // shouldn't throw an exception
+ breaker.addEstimateBytesAndMaybeBreak(2);
+
+ assertThat(breaker.getUsed(), equalTo(9L));
+
+ // adding 3 more bytes (now at 12)
+ breaker.addWithoutBreaking(3);
+
+ try {
+ // Adding no bytes still breaks
+ breaker.addEstimateBytesAndMaybeBreak(0);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java b/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java
new file mode 100644
index 0000000..65aa51c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Ordering;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Iterator;
+import java.util.List;
+
+public class Iterators2Tests extends ElasticsearchTestCase {
+
+ public void testDeduplicateSorted() {
+ final List<String> list = Lists.newArrayList();
+ for (int i = randomInt(100); i >= 0; --i) {
+ final int frequency = randomIntBetween(1, 10);
+ final String s = randomAsciiOfLength(randomIntBetween(2, 20));
+ for (int j = 0; j < frequency; ++j) {
+ list.add(s);
+ }
+ }
+ CollectionUtil.introSort(list);
+ final List<String> deduplicated = Lists.newArrayList();
+ for (Iterator<String> it = Iterators2.deduplicateSorted(list.iterator(), Ordering.natural()); it.hasNext(); ) {
+ deduplicated.add(it.next());
+ }
+ assertEquals(Lists.newArrayList(Sets.newTreeSet(list)), deduplicated);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java b/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java
new file mode 100644
index 0000000..9b63cbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/compress/CompressedStringTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public class CompressedStringTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTestsLZF() throws IOException {
+ simpleTests("lzf");
+ }
+
+ public void simpleTests(String compressor) throws IOException {
+ CompressorFactory.configure(ImmutableSettings.settingsBuilder().put("compress.default.type", compressor).build());
+ String str = "this is a simple string";
+ CompressedString cstr = new CompressedString(str);
+ assertThat(cstr.string(), equalTo(str));
+ assertThat(new CompressedString(str), equalTo(cstr));
+
+ String str2 = "this is a simple string 2";
+ CompressedString cstr2 = new CompressedString(str2);
+ assertThat(cstr2.string(), not(equalTo(str)));
+ assertThat(new CompressedString(str2), not(equalTo(cstr)));
+ assertThat(new CompressedString(str2), equalTo(cstr2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
new file mode 100644
index 0000000..55ff0bd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+
+
+/**
+ * Tests for {@link GeoHashUtils}
+ */
+public class GeoHashTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testGeohashAsLongRoutines() {
+
+ //Ensure that for all points at all supported levels of precision
+ // that the long encoding of a geohash is compatible with its
+ // String based counterpart
+ for (double lat=-90;lat<90;lat++)
+ {
+ for (double lng=-180;lng<180;lng++)
+ {
+ for(int p=1;p<=12;p++)
+ {
+ long geoAsLong = GeoHashUtils.encodeAsLong(lat,lng,p);
+ String geohash = GeoHashUtils.encode(lat,lng,p);
+
+ String geohashFromLong=GeoHashUtils.toString(geoAsLong);
+ assertEquals(geohash, geohashFromLong);
+ GeoPoint pos=GeoHashUtils.decode(geohash);
+ GeoPoint pos2=GeoHashUtils.decode(geoAsLong);
+ assertEquals(pos, pos2);
+ }
+ }
+
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
new file mode 100644
index 0000000..31b2900
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+
+/**
+ * Tests for {@link GeoJSONShapeParser}
+ */
+public class GeoJSONShapeParserTests extends ElasticsearchTestCase {
+
+ private final static GeometryFactory GEOMETRY_FACTORY = new GeometryFactory();
+
+ @Test
+ public void testParse_simplePoint() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, ShapeBuilder.SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ @Test
+ public void testParse_lineString() throws IOException {
+ String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> lineCoordinates = new ArrayList<Coordinate>();
+ lineCoordinates.add(new Coordinate(100, 0));
+ lineCoordinates.add(new Coordinate(101, 1));
+
+ LineString expected = GEOMETRY_FACTORY.createLineString(
+ lineCoordinates.toArray(new Coordinate[lineCoordinates.size()]));
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), lineGeoJson);
+ }
+
+ @Test
+ public void testParse_polygonNoHoles() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null);
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), polygonGeoJson);
+ }
+
+ @Test
+ public void testParse_polygonWithHole() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<Coordinate>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(
+ shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(
+ holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes);
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), polygonGeoJson);
+ }
+
+ @Test
+ public void testParse_multiPoint() throws IOException {
+ String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> multiPointCoordinates = new ArrayList<Coordinate>();
+ multiPointCoordinates.add(new Coordinate(100, 0));
+ multiPointCoordinates.add(new Coordinate(101, 1));
+
+ MultiPoint expected = GEOMETRY_FACTORY.createMultiPoint(
+ multiPointCoordinates.toArray(new Coordinate[multiPointCoordinates.size()]));
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), multiPointGeoJson);
+ }
+
+ @Test
+ public void testParse_multiPolygon() throws IOException {
+ String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .startArray().value(103.0).value(2.0).endArray()
+ .startArray().value(103.0).value(3.0).endArray()
+ .startArray().value(102.0).value(3.0).endArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .endArray()
+ .endArray()
+ .startArray()
+ .startArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<Coordinate>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
+
+ shellCoordinates = new ArrayList<Coordinate>();
+ shellCoordinates.add(new Coordinate(102, 3));
+ shellCoordinates.add(new Coordinate(103, 3));
+ shellCoordinates.add(new Coordinate(103, 2));
+ shellCoordinates.add(new Coordinate(102, 2));
+ shellCoordinates.add(new Coordinate(102, 3));
+
+
+ shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon withoutHoles = GEOMETRY_FACTORY.createPolygon(shell, null);
+
+ MultiPolygon expected = GEOMETRY_FACTORY.createMultiPolygon(new Polygon[] {withoutHoles, withHoles});
+
+ assertGeometryEquals(new JtsGeometry(expected, ShapeBuilder.SPATIAL_CONTEXT, false), multiPolygonGeoJson);
+ }
+
+ @Test
+ public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject()
+ .startObject("crs")
+ .field("type", "name")
+ .startObject("properties")
+ .field("name", "urn:ogc:def:crs:OGC:1.3:CRS84")
+ .endObject()
+ .endObject()
+ .field("bbox", "foobar")
+ .field("type", "point")
+ .field("bubu", "foobar")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .startObject("nested").startArray("coordinates").value(200.0).value(0.0).endArray().endObject()
+ .startObject("lala").field("type", "NotAPoint").endObject()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, ShapeBuilder.SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ private void assertGeometryEquals(Shape expected, String geoJson) throws IOException {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(geoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertEquals(ShapeBuilder.parse(parser).build(), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
new file mode 100644
index 0000000..9bb1505
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.LineString;
+import com.vividsolutions.jts.geom.Polygon;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiLineString;
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiPolygon;
+/**
+ * Tests for {@link ShapeBuilder}
+ */
+public class ShapeBuilderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNewPoint() {
+ Point point = ShapeBuilder.newPoint(-100, 45).build();
+ assertEquals(-100D, point.getX(), 0.0d);
+ assertEquals(45D, point.getY(), 0.0d);
+ }
+
+ @Test
+ public void testNewRectangle() {
+ Rectangle rectangle = ShapeBuilder.newEnvelope().topLeft(-45, 30).bottomRight(45, -30).build();
+ assertEquals(-45D, rectangle.getMinX(), 0.0d);
+ assertEquals(-30D, rectangle.getMinY(), 0.0d);
+ assertEquals(45D, rectangle.getMaxX(), 0.0d);
+ assertEquals(30D, rectangle.getMaxY(), 0.0d);
+ }
+
+ @Test
+ public void testNewPolygon() {
+ Polygon polygon = ShapeBuilder.newPolygon()
+ .point(-45, 30)
+ .point(45, 30)
+ .point(45, -30)
+ .point(-45, -30)
+ .point(-45, 30).toPolygon();
+
+ LineString exterior = polygon.getExteriorRing();
+ assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
+ assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
+ assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
+ assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
+ }
+
+ @Test
+ public void testLineStringBuilder() {
+ // Building a simple LineString
+ ShapeBuilder.newLineString()
+ .point(-130.0, 55.0)
+ .point(-130.0, -40.0)
+ .point(-15.0, -40.0)
+ .point(-20.0, 50.0)
+ .point(-45.0, 50.0)
+ .point(-45.0, -15.0)
+ .point(-110.0, -15.0)
+ .point(-110.0, 55.0).build();
+
+ // Building a linestring that needs to be wrapped
+ ShapeBuilder.newLineString()
+ .point(100.0, 50.0)
+ .point(110.0, -40.0)
+ .point(240.0, -40.0)
+ .point(230.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, -30.0)
+ .point(130.0, -30.0)
+ .point(130.0, 60.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(-180.0, 80.0)
+ .point(-180.0, 40.0)
+ .point(-180.0, -40.0)
+ .point(-180.0, -80.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(180.0, 80.0)
+ .point(180.0, 40.0)
+ .point(180.0, -40.0)
+ .point(180.0, -80.0)
+ .build();
+ }
+
+ @Test
+ public void testMultiLineString() {
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(-100.0, 50.0)
+ .point(50.0, 50.0)
+ .point(50.0, 20.0)
+ .point(-100.0, 20.0)
+ .end()
+ .linestring()
+ .point(-100.0, 20.0)
+ .point(50.0, 20.0)
+ .point(50.0, 0.0)
+ .point(-100.0, 0.0)
+ .end()
+ .build();
+
+
+ // LineString that needs to be wrappped
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(150.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, 40.0)
+ .point(150.0, 40.0)
+ .end()
+ .linestring()
+ .point(150.0, 20.0)
+ .point(200.0, 20.0)
+ .point(200.0, 0.0)
+ .point(150.0, 0.0)
+ .end()
+ .build();
+ }
+
+ @Test
+ public void testPolygonSelfIntersection() {
+ try {
+ ShapeBuilder.newPolygon()
+ .point(-40.0, 50.0)
+ .point(40.0, 50.0)
+ .point(-40.0, -50.0)
+ .point(40.0, -50.0)
+ .close().build();
+ fail("Polygon self-intersection");
+ } catch (Throwable e) {}
+
+ }
+
+ @Test
+ public void testGeoCircle() {
+ ShapeBuilder.newCircleBuilder().center(0, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(+180, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(-180, 0).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(0, 90).radius("100m").build();
+ ShapeBuilder.newCircleBuilder().center(0, -90).radius("100m").build();
+ }
+
+ @Test
+ public void testPolygonWrapping() {
+ Shape shape = ShapeBuilder.newPolygon()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .close().build();
+
+ assertMultiPolygon(shape);
+ }
+
+ @Test
+ public void testLineStringWrapping() {
+ Shape shape = ShapeBuilder.newLineString()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .build();
+
+ assertMultiLineString(shape);
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java b/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
new file mode 100644
index 0000000..a562131
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.hppc;
+
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class HppcMapsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIntersection() throws Exception {
+ assumeTrue(ASSERTIONS_ENABLED);
+ ObjectOpenHashSet<String> set1 = ObjectOpenHashSet.from("1", "2", "3");
+ ObjectOpenHashSet<String> set2 = ObjectOpenHashSet.from("1", "2", "3");
+ List<String> values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(3));
+ assertThat(values.contains("1"), equalTo(true));
+ assertThat(values.contains("2"), equalTo(true));
+ assertThat(values.contains("3"), equalTo(true));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(1));
+ assertThat(values.get(0), equalTo("3"));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from("4", "5", "6");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = ObjectOpenHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from("1", "2", "3");
+ set2 = ObjectOpenHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = ObjectOpenHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = null;
+ set2 = ObjectOpenHashSet.from();
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = ObjectOpenHashSet.from();
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = null;
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+ }
+
+ private List<String> toList(Iterable<String> iterable) {
+ List<String> list = new ArrayList<String>();
+ for (String s : iterable) {
+ list.add(s);
+ }
+ return list;
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/io/StreamsTests.java b/src/test/java/org/elasticsearch/common/io/StreamsTests.java
new file mode 100644
index 0000000..cde97c5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/StreamsTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Unit tests for {@link org.elasticsearch.common.io.Streams}.
+ */
+public class StreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCopyFromInputStream() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayInputStream in = new ByteArrayInputStream(content);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ long count = copy(in, out);
+
+ assertThat(count, equalTo((long) content.length));
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromByteArray() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ copy(content, out);
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyToByteArray() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayInputStream in = new ByteArrayInputStream(content);
+ byte[] result = copyToByteArray(in);
+ assertThat(Arrays.equals(content, result), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromReader() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ StringWriter out = new StringWriter();
+ int count = copy(in, out);
+ assertThat(content.length(), equalTo(count));
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyFromString() throws IOException {
+ String content = "content";
+ StringWriter out = new StringWriter();
+ copy(content, out);
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyToString() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ String result = copyToString(in);
+ assertThat(result, equalTo(content));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
new file mode 100644
index 0000000..3a34b17
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.streams;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BytesStreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleStreams() throws Exception {
+ assumeTrue(Constants.JRE_IS_64BIT);
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeBoolean(false);
+ out.writeByte((byte) 1);
+ out.writeShort((short) -1);
+ out.writeInt(-1);
+ out.writeVInt(2);
+ out.writeLong(-3);
+ out.writeVLong(4);
+ out.writeFloat(1.1f);
+ out.writeDouble(2.2);
+ int[] intArray = {1, 2, 3};
+ out.writeGenericValue(intArray);
+ long[] longArray = {1, 2, 3};
+ out.writeGenericValue(longArray);
+ float[] floatArray = {1.1f, 2.2f, 3.3f};
+ out.writeGenericValue(floatArray);
+ double[] doubleArray = {1.1, 2.2, 3.3};
+ out.writeGenericValue(doubleArray);
+ out.writeString("hello");
+ out.writeString("goodbye");
+ BytesStreamInput in = new BytesStreamInput(out.bytes().toBytes(), false);
+ assertThat(in.readBoolean(), equalTo(false));
+ assertThat(in.readByte(), equalTo((byte) 1));
+ assertThat(in.readShort(), equalTo((short) -1));
+ assertThat(in.readInt(), equalTo(-1));
+ assertThat(in.readVInt(), equalTo(2));
+ assertThat(in.readLong(), equalTo((long) -3));
+ assertThat(in.readVLong(), equalTo((long) 4));
+ assertThat((double) in.readFloat(), closeTo(1.1, 0.0001));
+ assertThat(in.readDouble(), closeTo(2.2, 0.0001));
+ assertThat(in.readGenericValue(), equalTo((Object)intArray));
+ assertThat(in.readGenericValue(), equalTo((Object)longArray));
+ assertThat(in.readGenericValue(), equalTo((Object)floatArray));
+ assertThat(in.readGenericValue(), equalTo((Object)doubleArray));
+ assertThat(in.readString(), equalTo("hello"));
+ assertThat(in.readString(), equalTo("goodbye"));
+ }
+
+ @Test
+ public void testGrowLogic() throws Exception {
+ assumeTrue(Constants.JRE_IS_64BIT);
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeBytes(new byte[BytesStreamOutput.DEFAULT_SIZE - 5]);
+ assertThat(out.bufferSize(), equalTo(2048)); // remains the default
+ out.writeBytes(new byte[1 * 1024]);
+ assertThat(out.bufferSize(), equalTo(4608));
+ out.writeBytes(new byte[32 * 1024]);
+ assertThat(out.bufferSize(), equalTo(40320));
+ out.writeBytes(new byte[32 * 1024]);
+ assertThat(out.bufferSize(), equalTo(90720));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java
new file mode 100644
index 0000000..a28082f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/io/streams/HandlesStreamsTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.streams;
+
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.HandlesStreamInput;
+import org.elasticsearch.common.io.stream.HandlesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class HandlesStreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSharedStringHandles() throws Exception {
+ String test1 = "test1";
+ String test2 = "test2";
+ String test3 = "test3";
+ String test4 = "test4";
+ String test5 = "test5";
+ String test6 = "test6";
+
+ BytesStreamOutput bout = new BytesStreamOutput();
+ HandlesStreamOutput out = new HandlesStreamOutput(bout);
+ out.writeString(test1);
+ out.writeString(test1);
+ out.writeString(test2);
+ out.writeString(test3);
+ out.writeSharedString(test4);
+ out.writeSharedString(test4);
+ out.writeSharedString(test5);
+ out.writeSharedString(test6);
+
+ BytesStreamInput bin = new BytesStreamInput(bout.bytes());
+ HandlesStreamInput in = new HandlesStreamInput(bin);
+ String s1 = in.readString();
+ String s2 = in.readString();
+ String s3 = in.readString();
+ String s4 = in.readString();
+ String s5 = in.readSharedString();
+ String s6 = in.readSharedString();
+ String s7 = in.readSharedString();
+ String s8 = in.readSharedString();
+
+ assertThat(s1, equalTo(test1));
+ assertThat(s2, equalTo(test1));
+ assertThat(s3, equalTo(test2));
+ assertThat(s4, equalTo(test3));
+ assertThat(s5, equalTo(test4));
+ assertThat(s6, equalTo(test4));
+ assertThat(s7, equalTo(test5));
+ assertThat(s8, equalTo(test6));
+
+ assertThat(s1, not(sameInstance(s2)));
+ assertThat(s5, sameInstance(s6));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java b/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
new file mode 100644
index 0000000..6c0ab25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DateMathParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void dataMathTests() {
+ DateMathParser parser = new DateMathParser(Joda.forPattern("dateOptionalTime"), TimeUnit.MILLISECONDS);
+
+ assertThat(parser.parse("now", 0), equalTo(0l));
+ assertThat(parser.parse("now+m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("now+1m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("now+11m", 0), equalTo(TimeUnit.MINUTES.toMillis(11)));
+
+ assertThat(parser.parse("now+1d", 0), equalTo(TimeUnit.DAYS.toMillis(1)));
+
+ assertThat(parser.parse("now+1m+1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) + TimeUnit.SECONDS.toMillis(1)));
+ assertThat(parser.parse("now+1m-1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) - TimeUnit.SECONDS.toMillis(1)));
+
+ assertThat(parser.parse("now+1m+1s/m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parseRoundCeil("now+1m+1s/m", 0), equalTo(TimeUnit.MINUTES.toMillis(2)));
+
+ assertThat(parser.parse("now+4y", 0), equalTo(TimeUnit.DAYS.toMillis(4*365 + 1)));
+ }
+
+ @Test
+ public void actualDateTests() {
+ DateMathParser parser = new DateMathParser(Joda.forPattern("dateOptionalTime"), TimeUnit.MILLISECONDS);
+
+ assertThat(parser.parse("1970-01-01", 0), equalTo(0l));
+ assertThat(parser.parse("1970-01-01||+1m", 0), equalTo(TimeUnit.MINUTES.toMillis(1)));
+ assertThat(parser.parse("1970-01-01||+1m+1s", 0), equalTo(TimeUnit.MINUTES.toMillis(1) + TimeUnit.SECONDS.toMillis(1)));
+
+ assertThat(parser.parse("2013-01-01||+1y", 0), equalTo(parser.parse("2013-01-01", 0) + TimeUnit.DAYS.toMillis(365)));
+ assertThat(parser.parse("2013-03-03||/y", 0), equalTo(parser.parse("2013-01-01", 0)));
+ assertThat(parser.parseRoundCeil("2013-03-03||/y", 0), equalTo(parser.parse("2014-01-01", 0)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
new file mode 100644
index 0000000..0ae9781
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class LuceneTest {
+
+
+ /*
+ * simple test that ensures that we bumb the version on Upgrade
+ */
+ @Test
+ public void testVersion() {
+ ESLogger logger = ESLoggerFactory.getLogger(LuceneTest.class.getName());
+ Version[] values = Version.values();
+ assertThat(Version.LUCENE_CURRENT, equalTo(values[values.length-1]));
+ assertThat("Latest Lucene Version is not set after upgrade", Lucene.VERSION, equalTo(values[values.length-2]));
+ assertThat(Lucene.parseVersion(null, Lucene.VERSION, null), equalTo(Lucene.VERSION));
+ for (int i = 0; i < values.length-1; i++) {
+ // this should fail if the lucene version is not mapped as a string in Lucene.java
+ assertThat(Lucene.parseVersion(values[i].name().replaceFirst("^LUCENE_(\\d)(\\d)$", "$1.$2"), Version.LUCENE_CURRENT, logger), equalTo(values[i]));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
new file mode 100644
index 0000000..bb7de12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleAllTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBoostOnEagerTokenizer() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "all", 2.0f);
+ allEntries.addText("field2", "your", 1.0f);
+ allEntries.addText("field1", "boosts", 0.5f);
+ allEntries.reset();
+ // whitespace analyzer's tokenizer reads characters eagerly on the contrary to the standard tokenizer
+ final TokenStream ts = AllTokenStream.allTokenStream("any", allEntries, new WhitespaceAnalyzer(Lucene.VERSION));
+ final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
+ ts.reset();
+ for (int i = 0; i < 3; ++i) {
+ assertTrue(ts.incrementToken());
+ final String term;
+ final float boost;
+ switch (i) {
+ case 0:
+ term = "all";
+ boost = 2;
+ break;
+ case 1:
+ term = "your";
+ boost = 1;
+ break;
+ case 2:
+ term = "boosts";
+ boost = 0.5f;
+ break;
+ default:
+ throw new AssertionError();
+ }
+ assertEquals(term, termAtt.toString());
+ final BytesRef payload = payloadAtt.getPayload();
+ if (payload == null || payload.length == 0) {
+ assertEquals(boost, 1f, 0.001f);
+ } else {
+ assertEquals(4, payload.length);
+ final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
+ assertEquals(boost, b, 0.001f);
+ }
+ }
+ assertFalse(ts.incrementToken());
+ }
+
+ @Test
+ public void testAllEntriesRead() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+
+ for (int i = 1; i < 30; i++) {
+ allEntries.reset();
+ char[] data = new char[i];
+ String value = slurpToString(allEntries, data);
+ assertThat("failed for " + i, value, equalTo("something else"));
+ }
+ }
+
+ private String slurpToString(AllEntries allEntries, char[] data) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ while (true) {
+ int read = allEntries.read(data, 0, data.length);
+ if (read == -1) {
+ break;
+ }
+ sb.append(data, 0, read);
+ }
+ return sb.toString();
+ }
+
+ private void assertExplanationScore(IndexSearcher searcher, Query query, ScoreDoc scoreDoc) throws IOException {
+ final Explanation expl = searcher.explain(query, scoreDoc.doc);
+ assertEquals(scoreDoc.score, expl.getValue(), 0.00001f);
+ }
+
+ @Test
+ public void testSimpleAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 1.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testSimpleAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 2.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // this one is boosted. so the second doc is more relevant
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 1.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 2.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNoTokensWithKeywordAnalyzer() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.KEYWORD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.KEYWORD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
+ assertThat(docs.totalHits, equalTo(1));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java
new file mode 100644
index 0000000..0b85525
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MatchAllDocsFilterTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MatchAllDocsFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMatchAllDocsFilter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("text", "lucene", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ document.add(new TextField("text", "lucene release", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ XConstantScoreQuery query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
+ long count = Lucene.count(searcher, query);
+ assertThat(count, equalTo(2l));
+
+ reader.close();
+ indexWriter.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java
new file mode 100644
index 0000000..c2b51f6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MoreLikeThisQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ indexWriter.commit();
+
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("text", "lucene", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ document.add(new TextField("text", "lucene release", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER);
+ mltQuery.setLikeText("lucene");
+ mltQuery.setMinTermFrequency(1);
+ mltQuery.setMinDocFreq(1);
+ long count = Lucene.count(searcher, mltQuery);
+ assertThat(count, equalTo(2l));
+
+ reader.close();
+ indexWriter.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
new file mode 100644
index 0000000..cc2fac4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MultiPhrasePrefixQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTests() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ Document doc = new Document();
+ doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aa"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "bb"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "cc"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "xxx"));
+ assertThat(Lucene.count(searcher, query), equalTo(0l));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
new file mode 100644
index 0000000..1c84a0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.docset.DocIdSets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class TermsFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testTermFilter() throws Exception {
+ String fieldName = "field1";
+ Directory rd = new RAMDirectory();
+ IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ for (int i = 0; i < 100; i++) {
+ Document doc = new Document();
+ int term = i * 10; //terms are units of 10;
+ doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
+ doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
+ w.addDocument(doc);
+ if ((i % 40) == 0) {
+ w.commit();
+ }
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
+ w.close();
+
+ TermFilter tf = new TermFilter(new Term(fieldName, "19"));
+ FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits, nullValue());
+
+ tf = new TermFilter(new Term(fieldName, "20"));
+ DocIdSet result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
+ assertThat(bits.cardinality(), equalTo(1));
+
+ tf = new TermFilter(new Term("all", "xxx"));
+ result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
+ assertThat(bits.cardinality(), equalTo(100));
+
+ reader.close();
+ rd.close();
+ }
+
+ @Test
+ public void testTermsFilter() throws Exception {
+ String fieldName = "field1";
+ Directory rd = new RAMDirectory();
+ IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ for (int i = 0; i < 100; i++) {
+ Document doc = new Document();
+ int term = i * 10; //terms are units of 10;
+ doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
+ doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
+ w.addDocument(doc);
+ if ((i % 40) == 0) {
+ w.commit();
+ }
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
+ w.close();
+
+ XTermsFilter tf = new XTermsFilter(new Term[]{new Term(fieldName, "19")});
+ FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits, nullValue());
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(1));
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(2));
+
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
+ bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(bits.cardinality(), equalTo(2));
+
+ reader.close();
+ rd.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
new file mode 100644
index 0000000..5aa4424
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.XTermsFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ * Tests ported from Lucene.
+ */
+public class XBooleanFilterLuceneTests extends ElasticsearchTestCase {
+
+ private Directory directory;
+ private AtomicReader reader;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ directory = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION)));
+
+ //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
+ addDoc(writer, "admin guest", "010", "20040101", "Y");
+ addDoc(writer, "guest", "020", "20040101", "Y");
+ addDoc(writer, "guest", "020", "20050101", "Y");
+ addDoc(writer, "admin", "020", "20050101", "Maybe");
+ addDoc(writer, "admin guest", "030", "20050101", "N");
+ writer.close();
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(directory));
+ writer.close();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ reader.close();
+ directory.close();
+ }
+
+ private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException {
+ Document doc = new Document();
+ doc.add(new TextField("accessRights", accessRights, Field.Store.YES));
+ doc.add(new TextField("price", price, Field.Store.YES));
+ doc.add(new TextField("date", date, Field.Store.YES));
+ doc.add(new TextField("inStock", inStock, Field.Store.YES));
+ writer.addDocument(doc);
+ }
+
+ private Filter getRangeFilter(String field, String lowerPrice, String upperPrice) {
+ return TermRangeFilter.newStringRange(field, lowerPrice, upperPrice, true, true);
+ }
+
+ private Filter getTermsFilter(String field, String text) {
+ return new XTermsFilter(new Term(field, text));
+ }
+
+ private Filter getWrappedTermQuery(String field, String text) {
+ return new QueryWrapperFilter(new TermQuery(new Term(field, text)));
+ }
+
+ private Filter getEmptyFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return new FixedBitSet(context.reader().maxDoc());
+ }
+ };
+ }
+
+ private Filter getNullDISFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return null;
+ }
+ };
+ }
+
+ private Filter getNullDISIFilter() {
+ return new Filter() {
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
+ return new DocIdSet() {
+ @Override
+ public DocIdSetIterator iterator() {
+ return null;
+ }
+
+ @Override
+ public boolean isCacheable() {
+ return true;
+ }
+ };
+ }
+ };
+ }
+
+ private void tstFilterCard(String mes, int expected, Filter filt) throws Exception {
+ int actual = 0;
+ DocIdSet docIdSet = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ if (docIdSet != null) {
+ DocIdSetIterator disi = docIdSet.iterator();
+ if (disi != null) {
+ while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ actual++;
+ }
+ }
+ }
+ assertThat(mes, actual, equalTo(expected));
+ }
+
+ @Test
+ public void testShould() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Should retrieves only 1 doc", 1, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("price", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Should retrieves only 1 doc", 1, booleanFilter);
+ }
+
+ @Test
+ public void testShoulds() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ tstFilterCard("Shoulds are Ored together", 5, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNot", 4, booleanFilter);
+
+ booleanFilter.add(getTermsFilter("inStock", "Maybe"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNots", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNot", 4, booleanFilter);
+
+ booleanFilter.add(getWrappedTermQuery("inStock", "Maybe"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but AndNots", 3, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMust() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUST", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUST", 3, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMusts() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "010", "020"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getRangeFilter("price", "020", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20040101", "20041231"), BooleanClause.Occur.MUST);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED", 1, booleanFilter);
+ }
+
+ @Test
+ public void testShouldsAndMustsAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "030", "040"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20050101", "20051231"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED and MustNot", 0, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getRangeFilter("price", "030", "040"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getRangeFilter("date", "20050101", "20051231"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("Shoulds Ored but MUSTs ANDED and MustNot", 0, booleanFilter);
+ }
+
+ @Test
+ public void testJustMust() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("MUST", 3, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard("MUST", 3, booleanFilter);
+ }
+
+ @Test
+ public void testJustMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT", 4, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT", 4, booleanFilter);
+ }
+
+ @Test
+ public void testMustAndMustNot() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT wins over MUST for same docs", 0, booleanFilter);
+
+ // same with a real DISI (no OpenBitSetIterator)
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getWrappedTermQuery("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getWrappedTermQuery("price", "030"), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("MUST_NOT wins over MUST for same docs", 0, booleanFilter);
+ }
+
+ @Test
+ public void testEmpty() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ tstFilterCard("empty XBooleanFilter returns no results", 0, booleanFilter);
+ }
+
+ @Test
+ public void testCombinedNullDocIdSets() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A SHOULD filter that returns a null DIS should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A SHOULD filter that returns a null DISI should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A MUST_NOT filter that returns a null DIS should be invisible", 1, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("price", "030"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A MUST_NOT filter that returns a null DISI should be invisible", 1, booleanFilter);
+ }
+
+ @Test
+ public void testJustNullDocIdSets() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST);
+ tstFilterCard("A MUST filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A single SHOULD filter that returns a null DIS should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ tstFilterCard("A single SHOULD filter that returns a null DISI should never return documents", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A single MUST_NOT filter that returns a null DIS should be invisible", 5, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.MUST_NOT);
+ tstFilterCard("A single MUST_NOT filter that returns a null DIS should be invisible", 5, booleanFilter);
+ }
+
+ @Test
+ public void testNonMatchingShouldsAndMusts() throws Exception {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getEmptyFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+
+ booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getNullDISIFilter(), BooleanClause.Occur.SHOULD);
+ booleanFilter.add(getTermsFilter("accessRights", "admin"), BooleanClause.Occur.MUST);
+ tstFilterCard(">0 shoulds with no matches should return no docs", 0, booleanFilter);
+ }
+
+ @Test
+ public void testToStringOfBooleanFilterContainingTermsFilter() {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ booleanFilter.add(getTermsFilter("inStock", "N"), BooleanClause.Occur.MUST);
+ booleanFilter.add(getTermsFilter("isFragile", "Y"), BooleanClause.Occur.MUST);
+
+ assertThat("BooleanFilter(+inStock:N +isFragile:Y)", equalTo(booleanFilter.toString()));
+ }
+
+ @Test
+ public void testToStringOfWrappedBooleanFilters() {
+ XBooleanFilter orFilter = new XBooleanFilter();
+
+ XBooleanFilter stockFilter = new XBooleanFilter();
+ stockFilter.add(new FilterClause(getTermsFilter("inStock", "Y"), BooleanClause.Occur.MUST));
+ stockFilter.add(new FilterClause(getTermsFilter("barCode", "12345678"), BooleanClause.Occur.MUST));
+
+ orFilter.add(new FilterClause(stockFilter, BooleanClause.Occur.SHOULD));
+
+ XBooleanFilter productPropertyFilter = new XBooleanFilter();
+ productPropertyFilter.add(new FilterClause(getTermsFilter("isHeavy", "N"), BooleanClause.Occur.MUST));
+ productPropertyFilter.add(new FilterClause(getTermsFilter("isDamaged", "Y"), BooleanClause.Occur.MUST));
+
+ orFilter.add(new FilterClause(productPropertyFilter, BooleanClause.Occur.SHOULD));
+
+ XBooleanFilter composedFilter = new XBooleanFilter();
+ composedFilter.add(new FilterClause(orFilter, BooleanClause.Occur.MUST));
+
+ assertThat(
+ "BooleanFilter(+BooleanFilter(BooleanFilter(+inStock:Y +barCode:12345678) BooleanFilter(+isHeavy:N +isDamaged:Y)))",
+ equalTo(composedFilter.toString())
+ );
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java
new file mode 100644
index 0000000..32c08af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterTests.java
@@ -0,0 +1,567 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.FilterClause;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.lucene.search.BooleanClause.Occur.*;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ */
+public class XBooleanFilterTests extends ElasticsearchLuceneTestCase {
+
+ private Directory directory;
+ private AtomicReader reader;
+ private static final char[] distinctValues = new char[] {'a', 'b', 'c', 'd', 'v','z','y'};
+
+ @Before
+ public void setup() throws Exception {
+ super.setUp();
+ char[][] documentMatrix = new char[][] {
+ {'a', 'b', 'c', 'd', 'v'},
+ {'a', 'b', 'c', 'd', 'z'},
+ {'a', 'a', 'a', 'a', 'x'}
+ };
+
+ List<Document> documents = new ArrayList<Document>(documentMatrix.length);
+ for (char[] fields : documentMatrix) {
+ Document document = new Document();
+ for (int i = 0; i < fields.length; i++) {
+ document.add(new StringField(Integer.toString(i), String.valueOf(fields[i]), Field.Store.NO));
+ }
+ documents.add(document);
+ }
+ directory = newDirectory();
+ IndexWriter w = new IndexWriter(directory, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
+ w.addDocuments(documents);
+ w.close();
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(directory));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ reader.close();
+ directory.close();
+ super.tearDown();
+
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allFixedBitsetFilters() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allBitsBasedFilters() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_allFilterTypes() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false),
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true), newFilterClause(3, 'd', SHOULD, false),
+ newFilterClause(4, 'e', MUST_NOT, true), newFilterClause(5, 'f', MUST_NOT, false),
+ newFilterClause(0, 'a', MUST, true), newFilterClause(1, 'b', MUST, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true),
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true)
+ ));
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, false), newFilterClause(3, 'd', SHOULD, true),
+ newFilterClause(4, 'e', MUST_NOT, false), newFilterClause(5, 'f', MUST_NOT, true),
+ newFilterClause(0, 'a', MUST, false), newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testWithTwoClausesOfEachOccur_singleClauseOptimisation() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'b', MUST, true)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'c', MUST_NOT, true)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(3));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ booleanFilters.clear();
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(2, 'c', SHOULD, true)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testOnlyShouldClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // 2 slow filters
+ // This case caused: https://github.com/elasticsearch/elasticsearch/issues/2826
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(1, 'b', SHOULD, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, false),
+ newFilterClause(1, 'b', SHOULD, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(1, 'b', SHOULD, false)
+ ));
+
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(3));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testOnlyMustClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // Slow filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, true),
+ newFilterClause(3, 'd', MUST, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, false),
+ newFilterClause(3, 'd', MUST, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(3, 'd', MUST, true),
+ newFilterClause(3, 'd', MUST, false)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testOnlyMustNotClauses() throws Exception {
+ List<XBooleanFilter> booleanFilters = new ArrayList<XBooleanFilter>();
+ // Slow filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, true),
+ newFilterClause(1, 'a', MUST_NOT, true)
+ ));
+ // 2 fast filters
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, false),
+ newFilterClause(1, 'a', MUST_NOT, false)
+ ));
+ // 1 fast filters, 1 slow filter
+ booleanFilters.add(createBooleanFilter(
+ newFilterClause(1, 'a', MUST_NOT, true),
+ newFilterClause(1, 'a', MUST_NOT, false)
+ ));
+ for (XBooleanFilter booleanFilter : booleanFilters) {
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(true));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testNonMatchingSlowShouldWithMatchingMust() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(0, 'b', SHOULD, true)
+ );
+
+ DocIdSet docIdSet = booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ assertThat(docIdSet, equalTo(null));
+ }
+
+ @Test
+ public void testSlowShouldClause_atLeastOneShouldMustMatch() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(1, 'a', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(1));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(false));
+ assertThat(result.get(2), equalTo(true));
+
+ booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(1, 'a', SHOULD, true),
+ newFilterClause(4, 'z', SHOULD, true)
+ );
+
+ result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ // See issue: https://github.com/elasticsearch/elasticsearch/issues/4130
+ public void testOneFastMustNotOneFastShouldAndOneSlowShould() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(4, 'v', MUST_NOT, false),
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ public void testOneFastShouldClauseAndOneSlowShouldClause() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ @Test
+ public void testOneMustClauseOneFastShouldClauseAndOneSlowShouldClause() throws Exception {
+ XBooleanFilter booleanFilter = createBooleanFilter(
+ newFilterClause(0, 'a', MUST, false),
+ newFilterClause(4, 'z', SHOULD, false),
+ newFilterClause(4, 'x', SHOULD, true)
+ );
+
+ FixedBitSet result = new FixedBitSet(reader.maxDoc());
+ result.or(booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(2));
+ assertThat(result.get(0), equalTo(false));
+ assertThat(result.get(1), equalTo(true));
+ assertThat(result.get(2), equalTo(true));
+ }
+
+ private static FilterClause newFilterClause(int field, char character, BooleanClause.Occur occur, boolean slowerBitsBackedFilter) {
+ Filter filter;
+ if (slowerBitsBackedFilter) {
+ filter = new PrettyPrintFieldCacheTermsFilter(String.valueOf(field), String.valueOf(character));
+ } else {
+ Term term = new Term(String.valueOf(field), String.valueOf(character));
+ filter = new TermFilter(term);
+ }
+ return new FilterClause(filter, occur);
+ }
+
+ private static XBooleanFilter createBooleanFilter(FilterClause... clauses) {
+ XBooleanFilter booleanFilter = new XBooleanFilter();
+ for (FilterClause clause : clauses) {
+ booleanFilter.add(clause);
+ }
+ return booleanFilter;
+ }
+
+ @Test
+ public void testRandom() throws IOException {
+ int iterations = atLeast(400); // don't worry that is fast!
+ for (int iter = 0; iter < iterations; iter++) {
+ int numClauses = 1 + random().nextInt(10);
+ FilterClause[] clauses = new FilterClause[numClauses];
+ BooleanQuery topLevel = new BooleanQuery();
+ BooleanQuery orQuery = new BooleanQuery();
+ boolean hasMust = false;
+ boolean hasShould = false;
+ boolean hasMustNot = false;
+ for(int i = 0; i < numClauses; i++) {
+ int field = random().nextInt(5);
+ char value = distinctValues[random().nextInt(distinctValues.length)];
+ switch(random().nextInt(10)) {
+ case 9:
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ hasMust = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), MUST);
+ topLevel.add(new BooleanClause(new MatchNoDocsQuery(), MUST));
+ } else {
+ clauses[i] = newFilterClause(field, value, MUST, random().nextBoolean());
+ topLevel.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), MUST));
+ }
+ break;
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ hasShould = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), SHOULD);
+ orQuery.add(new BooleanClause(new MatchNoDocsQuery(), SHOULD));
+ } else {
+ clauses[i] = newFilterClause(field, value, SHOULD, random().nextBoolean());
+ orQuery.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), SHOULD));
+ }
+ break;
+ case 0:
+ hasMustNot = true;
+ if (rarely()) {
+ clauses[i] = new FilterClause(new EmptyFilter(), MUST_NOT);
+ topLevel.add(new BooleanClause(new MatchNoDocsQuery(), MUST_NOT));
+ } else {
+ clauses[i] = newFilterClause(field, value, MUST_NOT, random().nextBoolean());
+ topLevel.add(new BooleanClause(new TermQuery(new Term(String.valueOf(field), String.valueOf(value))), MUST_NOT));
+ }
+ break;
+
+ }
+ }
+ if (orQuery.getClauses().length > 0) {
+ topLevel.add(new BooleanClause(orQuery, MUST));
+ }
+ if (hasMustNot && !hasMust && !hasShould) { // pure negative
+ topLevel.add(new BooleanClause(new MatchAllDocsQuery(), MUST));
+ }
+ XBooleanFilter booleanFilter = createBooleanFilter(clauses);
+
+ FixedBitSet leftResult = new FixedBitSet(reader.maxDoc());
+ FixedBitSet rightResult = new FixedBitSet(reader.maxDoc());
+ DocIdSet left = booleanFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ DocIdSet right = new QueryWrapperFilter(topLevel).getDocIdSet(reader.getContext(), reader.getLiveDocs());
+ if (left == null || right == null) {
+ if (left == null && right != null) {
+ assertThat(errorMsg(clauses, topLevel), (right.iterator() == null ? DocIdSetIterator.NO_MORE_DOCS : right.iterator().nextDoc()), equalTo(DocIdSetIterator.NO_MORE_DOCS));
+ }
+ if (left != null && right == null) {
+ assertThat(errorMsg(clauses, topLevel), (left.iterator() == null ? DocIdSetIterator.NO_MORE_DOCS : left.iterator().nextDoc()), equalTo(DocIdSetIterator.NO_MORE_DOCS));
+ }
+ } else {
+ DocIdSetIterator leftIter = left.iterator();
+ DocIdSetIterator rightIter = right.iterator();
+ if (leftIter != null) {
+ leftResult.or(leftIter);
+ }
+
+ if (rightIter != null) {
+ rightResult.or(rightIter);
+ }
+
+ assertThat(leftResult.cardinality(), equalTo(rightResult.cardinality()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(errorMsg(clauses, topLevel) + " -- failed at index " + i, leftResult.get(i), equalTo(rightResult.get(i)));
+ }
+ }
+ }
+ }
+
+ private String errorMsg(FilterClause[] clauses, BooleanQuery query) {
+ return query.toString() + " vs. " + Arrays.toString(clauses);
+ }
+
+
+ public static final class PrettyPrintFieldCacheTermsFilter extends FieldCacheTermsFilter {
+
+ private final String value;
+ private final String field;
+
+ public PrettyPrintFieldCacheTermsFilter(String field, String value) {
+ super(field, value);
+ this.field = field;
+ this.value = value;
+ }
+
+ @Override
+ public String toString() {
+ return "SLOW(" + field + ":" + value + ")";
+ }
+ }
+
+ public final class EmptyFilter extends Filter {
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ return random().nextBoolean() ? new Empty() : null;
+ }
+
+ private class Empty extends DocIdSet {
+
+ @Override
+ public DocIdSetIterator iterator() throws IOException {
+ return null;
+ }
+ }
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
new file mode 100644
index 0000000..315b93e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ *
+ */
+public class InputStreamIndexInputTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSingleReadSingleByteLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiSingleByteLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 2));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testSingleReadTwoBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiTwoBytesLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiFourBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[4];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(4l));
+ assertThat(is.read(read), equalTo(4));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+ assertThat(read[2], equalTo((byte) 1));
+ assertThat(read[3], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testMarkRest() throws Exception {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.markSupported(), equalTo(true));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ is.mark(0);
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ is.reset();
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
new file mode 100644
index 0000000..8556058
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.uid;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.*;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.index.merge.policy.IndexUpgraderMergePolicy;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class VersionsTests extends ElasticsearchLuceneTestCase {
+
+ public static DirectoryReader reopen(DirectoryReader reader) throws IOException {
+ return reopen(reader, true);
+ }
+
+ public static DirectoryReader reopen(DirectoryReader reader, boolean newReaderExpected) throws IOException {
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ if (newReader != null) {
+ reader.close();
+ } else {
+ assertFalse(newReaderExpected);
+ }
+ return newReader;
+ }
+ @Test
+ public void testVersions() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ writer.addDocument(doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET));
+
+ doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l));
+
+ doc = new Document();
+ Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l));
+
+ // test reuse of uid field
+ doc = new Document();
+ version.setLongValue(3);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNestedDocuments() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ List<Document> docs = new ArrayList<Document>();
+ for (int i = 0; i < 4; ++i) {
+ // Nested
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ docs.add(doc);
+ }
+ // Root
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ doc.add(version);
+ docs.add(doc);
+
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l));
+
+ version.setLongValue(6L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ version.setLongValue(7L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBackwardCompatibility() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ UidField uidAndVersion = new UidField("1", 1L);
+ doc.add(uidAndVersion);
+ writer.addDocument(doc);
+
+ uidAndVersion.uid = "2";
+ uidAndVersion.version = 2;
+ writer.addDocument(doc);
+ writer.commit();
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ // This is how versions used to be encoded
+ private static class UidField extends Field {
+ private static final FieldType FIELD_TYPE = new FieldType();
+ static {
+ FIELD_TYPE.setTokenized(true);
+ FIELD_TYPE.setIndexed(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.freeze();
+ }
+ String uid;
+ long version;
+ UidField(String uid, long version) {
+ super(UidFieldMapper.NAME, uid, FIELD_TYPE);
+ this.uid = uid;
+ this.version = version;
+ }
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer) throws IOException {
+ return new TokenStream() {
+ boolean finished = true;
+ final CharTermAttribute term = addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (finished) {
+ return false;
+ }
+ term.setEmpty().append(uid);
+ payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
+ finished = true;
+ return true;
+ }
+ @Override
+ public void reset() throws IOException {
+ finished = false;
+ }
+ };
+ }
+ }
+
+ @Test
+ public void testMergingOldIndices() throws Exception {
+ final IndexWriterConfig iwConf = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
+ iwConf.setMergePolicy(new IndexUpgraderMergePolicy(iwConf.getMergePolicy()));
+ final Directory dir = newDirectory();
+ final IndexWriter iw = new IndexWriter(dir, iwConf);
+
+ // 1st segment, no _version
+ Document document = new Document();
+ // Add a dummy field (enough to trigger #3237)
+ document.add(new StringField("a", "b", Store.NO));
+ StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
+ document.add(uid);
+ iw.addDocument(document);
+ uid.setStringValue("2");
+ iw.addDocument(document);
+ iw.commit();
+
+ // 2nd segment, old layout
+ document = new Document();
+ UidField uidAndVersion = new UidField("3", 3L);
+ document.add(uidAndVersion);
+ iw.addDocument(document);
+ uidAndVersion.uid = "4";
+ uidAndVersion.version = 4L;
+ iw.addDocument(document);
+ iw.commit();
+
+ // 3rd segment new layout
+ document = new Document();
+ uid.setStringValue("5");
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ document.add(uid);
+ document.add(version);
+ iw.addDocument(document);
+ uid.setStringValue("6");
+ version.setLongValue(6L);
+ iw.addDocument(document);
+ iw.commit();
+
+ final Map<String, Long> expectedVersions = ImmutableMap.<String, Long>builder()
+ .put("1", 0L).put("2", 0L).put("3", 0L).put("4", 4L).put("5", 5L).put("6", 6L).build();
+
+ // Force merge and check versions
+ Merges.forceMerge(iw, 1);
+ final AtomicReader ir = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(iw.getDirectory()));
+ final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
+ assertThat(versions, notNullValue());
+ for (int i = 0; i < ir.maxDoc(); ++i) {
+ final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
+ final long expectedVersion = expectedVersions.get(uidValue);
+ assertThat(versions.get(i), equalTo(expectedVersion));
+ }
+
+ iw.close();
+ assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false));
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/path/PathTrieTests.java b/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
new file mode 100644
index 0000000..6b60a45
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.path;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathTrieTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/b/c", "walla");
+ trie.insert("a/d/g", "kuku");
+ trie.insert("x/b/c", "lala");
+ trie.insert("a/x/*", "one");
+ trie.insert("a/b/*", "two");
+ trie.insert("*/*/x", "three");
+ trie.insert("{index}/insert/{docId}", "bingo");
+
+ assertThat(trie.retrieve("a/b/c"), equalTo("walla"));
+ assertThat(trie.retrieve("a/d/g"), equalTo("kuku"));
+ assertThat(trie.retrieve("x/b/c"), equalTo("lala"));
+ assertThat(trie.retrieve("a/x/b"), equalTo("one"));
+ assertThat(trie.retrieve("a/b/d"), equalTo("two"));
+
+ assertThat(trie.retrieve("a/b"), nullValue());
+ assertThat(trie.retrieve("a/b/c/d"), nullValue());
+ assertThat(trie.retrieve("g/t/x"), equalTo("three"));
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("index1/insert/12", params), equalTo("bingo"));
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("index"), equalTo("index1"));
+ assertThat(params.get("docId"), equalTo("12"));
+ }
+
+ @Test
+ public void testEmptyPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/", "walla");
+ assertThat(trie.retrieve(""), equalTo("walla"));
+ }
+
+ @Test
+ public void testDifferentNamesOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/{type}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/test", params), equalTo("test1"));
+ assertThat(params.get("type"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testSameNameOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("/a/c/{name}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/c/test", params), equalTo("test1"));
+ assertThat(params.get("name"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testPreferNonWildcardExecution() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("{test}", "test1");
+ trie.insert("b", "test2");
+ trie.insert("{test}/a", "test3");
+ trie.insert("b/a", "test4");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/b", params), equalTo("test2"));
+ assertThat(trie.retrieve("/b/a", params), equalTo("test4"));
+ }
+
+ @Test
+ public void testSamePathConcreteResolution() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("{x}/{y}/{z}", "test1");
+ trie.insert("{x}/_y/{k}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/b/c", params), equalTo("test1"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("y"), equalTo("b"));
+ assertThat(params.get("z"), equalTo("c"));
+ params.clear();
+ assertThat(trie.retrieve("/a/_y/c", params), equalTo("test2"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("k"), equalTo("c"));
+ }
+
+ @Test
+ public void testNamedWildcardAndLookupWithWildcard() {
+ PathTrie<String> trie = new PathTrie<String>();
+ trie.insert("x/{test}", "test1");
+ trie.insert("{test}/a", "test2");
+ trie.insert("/{test}", "test3");
+ trie.insert("/{test}/_endpoint", "test4");
+ trie.insert("/*/{test}/_endpoint", "test5");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/x/*", params), equalTo("test1"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/b/a", params), equalTo("test2"));
+ assertThat(params.get("test"), equalTo("b"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*", params), equalTo("test3"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*/_endpoint", params), equalTo("test4"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("a/*/_endpoint", params), equalTo("test5"));
+ assertThat(params.get("test"), equalTo("*"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
new file mode 100644
index 0000000..74c491b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+public abstract class AbstractRecyclerTests extends ElasticsearchTestCase {
+
+ protected static final Recycler.C<byte[]> RECYCLER_C = new Recycler.C<byte[]>() {
+
+ @Override
+ public byte[] newInstance(int sizing) {
+ return new byte[10];
+ }
+
+ @Override
+ public void clear(byte[] value) {
+ Arrays.fill(value, (byte) 0);
+ }
+
+ };
+
+ protected abstract Recycler<byte[]> newRecycler();
+
+ public void testReuse() {
+ Recycler<byte[]> r = newRecycler();
+ Recycler.V<byte[]> o = r.obtain();
+ assertFalse(o.isRecycled());
+ final byte[] b1 = o.v();
+ o.release();
+ o = r.obtain();
+ final byte[] b2 = o.v();
+ if (o.isRecycled()) {
+ assertSame(b1, b2);
+ } else {
+ assertNotSame(b1, b2);
+ }
+ o.release();
+ r.close();
+ }
+
+ public void testClear() {
+ Recycler<byte[]> r = newRecycler();
+ Recycler.V<byte[]> o = r.obtain();
+ getRandom().nextBytes(o.v());
+ o.release();
+ o = r.obtain();
+ for (int i = 0; i < o.v().length; ++i) {
+ assertEquals(0, o.v()[i]);
+ }
+ o.release();
+ r.close();
+ }
+
+ public void testDoubleRelease() {
+ final Recycler<byte[]> r = newRecycler();
+ final Recycler.V<byte[]> v1 = r.obtain();
+ v1.release();
+ try {
+ v1.release();
+ } catch (ElasticsearchIllegalStateException e) {
+ // impl has protection against double release: ok
+ return;
+ }
+ // otherwise ensure that the impl may not be returned twice
+ final Recycler.V<byte[]> v2 = r.obtain();
+ final Recycler.V<byte[]> v3 = r.obtain();
+ assertNotSame(v2.v(), v3.v());
+ r.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
new file mode 100644
index 0000000..758041d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class ConcurrentRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrent(Recyclers.dequeFactory(RECYCLER_C, randomIntBetween(5, 10)), randomIntBetween(1,5));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
new file mode 100644
index 0000000..9ffdf7a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class LockedRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.locked(Recyclers.deque(RECYCLER_C, randomIntBetween(5, 10)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
new file mode 100644
index 0000000..a60c0ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class NoneRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.none(RECYCLER_C);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
new file mode 100644
index 0000000..f693c30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class QueueRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrentDeque(RECYCLER_C, randomIntBetween(5, 10));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java
new file mode 100644
index 0000000..0320ff5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/SoftConcurrentRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class SoftConcurrentRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.concurrent(Recyclers.softFactory(Recyclers.dequeFactory(RECYCLER_C, randomIntBetween(5, 10))), randomIntBetween(1, 5));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java
new file mode 100644
index 0000000..2a5d253
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/SoftThreadLocalRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class SoftThreadLocalRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.threadLocal(Recyclers.softFactory(Recyclers.dequeFactory(RECYCLER_C, 10)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java
new file mode 100644
index 0000000..5ab6892
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/recycler/ThreadLocalRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class ThreadLocalRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler() {
+ return Recyclers.threadLocal(Recyclers.dequeFactory(RECYCLER_C, 10));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/regex/RegexTests.java b/src/test/java/org/elasticsearch/common/regex/RegexTests.java
new file mode 100644
index 0000000..380bf90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/regex/RegexTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.regex;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class RegexTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFlags() {
+ String[] supportedFlags = new String[]{"CASE_INSENSITIVE", "MULTILINE", "DOTALL", "UNICODE_CASE", "CANON_EQ", "UNIX_LINES",
+ "LITERAL", "COMMENTS", "UNICODE_CHAR_CLASS"};
+ int[] flags = new int[]{Pattern.CASE_INSENSITIVE, Pattern.MULTILINE, Pattern.DOTALL, Pattern.UNICODE_CASE, Pattern.CANON_EQ,
+ Pattern.UNIX_LINES, Pattern.LITERAL, Pattern.COMMENTS, Regex.UNICODE_CHARACTER_CLASS};
+ Random random = getRandom();
+ int num = 10 + random.nextInt(100);
+ for (int i = 0; i < num; i++) {
+ int numFlags = random.nextInt(flags.length + 1);
+ int current = 0;
+ StringBuilder builder = new StringBuilder();
+ for (int j = 0; j < numFlags; j++) {
+ int index = random.nextInt(flags.length);
+ current |= flags[index];
+ builder.append(supportedFlags[index]);
+ if (j < numFlags - 1) {
+ builder.append("|");
+ }
+ }
+ String flagsToString = Regex.flagsToString(current);
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(current));
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(Regex.flagsFromString(flagsToString)));
+ Pattern.compile("\\w\\d{1,2}", current); // accepts the flags?
+ }
+ }
+
+ @Test(timeout = 1000)
+ public void testDoubleWildcardMatch() {
+ assertTrue(Regex.simpleMatch("ddd", "ddd"));
+ assertTrue(Regex.simpleMatch("d*d*d", "dadd"));
+ assertTrue(Regex.simpleMatch("**ddd", "dddd"));
+ assertFalse(Regex.simpleMatch("**ddd", "fff"));
+ assertTrue(Regex.simpleMatch("fff*ddd", "fffabcddd"));
+ assertTrue(Regex.simpleMatch("fff**ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff**ddd", "fffabcdd"));
+ assertTrue(Regex.simpleMatch("fff*******ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff******ddd", "fffabcdd"));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java b/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
new file mode 100644
index 0000000..cd77bfc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+
+
+public class RoundingTests extends ElasticsearchTestCase {
+
+ public void testInterval() {
+ final long interval = randomIntBetween(1, 100);
+ Rounding.Interval rounding = new Rounding.Interval(interval);
+ for (int i = 0; i < 1000; ++i) {
+ long l = Math.max(randomLong(), Long.MIN_VALUE + interval);
+ final long r = rounding.round(l);
+ String message = "round(" + l + ", interval=" + interval + ") = " + r;
+ assertEquals(message, 0, r % interval);
+ assertThat(message, r, lessThanOrEqualTo(l));
+ assertThat(message, r + interval, greaterThan(l));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
new file mode 100644
index 0000000..f57f4e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class TimeZoneRoundingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testUTCMonthRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-01T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-01T00:00:00.000Z")), equalTo(utc("2009-03-01T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-09T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-09T00:00:00.000Z")), equalTo(utc("2012-01-16T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).postOffset(-TimeValue.timeValueHours(24).millis()).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-08T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-08T00:00:00.000Z")), equalTo(utc("2012-01-15T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testDayTimeZoneRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(24).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(24).millis()), equalTo(0l));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(26).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(26).millis()), equalTo(-TimeValue.timeValueHours(2).millis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-02T00:00:00")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-02T00:00:00")), equalTo(utc("2009-02-03T00:00:00")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(time("2009-02-02T00:00:00", DateTimeZone.forOffsetHours(+2))));
+ assertThat(tzRounding.nextRoundingValue(time("2009-02-02T00:00:00", DateTimeZone.forOffsetHours(+2))), equalTo(time("2009-02-03T00:00:00", DateTimeZone.forOffsetHours(+2))));
+ }
+
+ @Test
+ public void testTimeTimeZoneRounding() {
+ TimeZoneRounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l));
+ assertThat(tzRounding.nextRoundingValue(0l), equalTo(TimeValue.timeValueHours(1l).getMillis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(2).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(2).millis()), equalTo(0l - TimeValue.timeValueHours(1).millis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T01:00:00")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T01:00:00")), equalTo(utc("2009-02-03T02:00:00")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).preZone(DateTimeZone.forOffsetHours(-2)).postZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(time("2009-02-03T01:00:00", DateTimeZone.forOffsetHours(+2))));
+ assertThat(tzRounding.nextRoundingValue(time("2009-02-03T01:00:00", DateTimeZone.forOffsetHours(+2))), equalTo(time("2009-02-03T02:00:00", DateTimeZone.forOffsetHours(+2))));
+ }
+
+ private long utc(String time) {
+ return time(time, DateTimeZone.UTC);
+ }
+
+ private long time(String time, DateTimeZone zone) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java b/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java
new file mode 100644
index 0000000..845f7e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/ImmutableSettingsTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.settings.bar.BarTestClass;
+import org.elasticsearch.common.settings.foo.FooTestClass;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class ImmutableSettingsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testGetAsClass() {
+ Settings settings = settingsBuilder()
+ .put("test.class", "bar")
+ .put("test.class.package", "org.elasticsearch.common.settings.bar")
+ .build();
+
+ // Assert that defaultClazz is loaded if setting is not specified
+ assertThat(settings.getAsClass("no.settings", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(FooTestClass.class.getName()));
+
+ // Assert that correct class is loaded if setting contain name without package
+ assertThat(settings.getAsClass("test.class", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ // Assert that class cannot be loaded if wrong packagePrefix is specified
+ try {
+ settings.getAsClass("test.class", FooTestClass.class, "com.example.elasticsearch.test.unit..common.settings.", "TestClass");
+ fail("Class with wrong package name shouldn't be loaded");
+ } catch (NoClassSettingsException ex) {
+ // Ignore
+ }
+
+ // Assert that package name in settings is getting correctly applied
+ assertThat(settings.getAsClass("test.class.package", FooTestClass.class, "com.example.elasticsearch.test.unit.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ }
+
+ @Test
+ public void testLoadFromDelimitedString() {
+ Settings settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+
+ settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2;", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+ }
+
+ @Test(expected = NoClassSettingsException.class)
+ public void testThatAllClassNotFoundExceptionsAreCaught() {
+ // this should be nGram in order to really work, but for sure not not throw a NoClassDefFoundError
+ Settings settings = settingsBuilder().put("type", "ngram").build();
+ settings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenFilterFactory");
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderSystemProperty() {
+ System.setProperty("sysProp1", "sysVal1");
+ try {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("sysVal1"));
+ } finally {
+ System.clearProperty("sysProp1");
+ }
+
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1:defaultVal1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("defaultVal1"));
+
+ settings = settingsBuilder()
+ .put("setting1", "${sysProp1:}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderIgnoreEnvUnset() {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${env.UNSET_ENV_VAR}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testUnFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("bar", "def")
+ .put("baz.foo", "ghi")
+ .put("baz.bar", "jkl")
+ .putArray("baz.arr", "a", "b", "c")
+ .build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("bar", "def")));
+
+ @SuppressWarnings("unchecked") Map<String, Object> bazMap = (Map<String, Object>) map.get("baz");
+ assertThat(bazMap.keySet(), Matchers.<String>hasSize(3));
+ assertThat(bazMap, allOf(
+ Matchers.<String, Object>hasEntry("foo", "ghi"),
+ Matchers.<String, Object>hasEntry("bar", "jkl")));
+ @SuppressWarnings("unchecked") List<String> bazArr = (List<String>) bazMap.get("arr");
+ assertThat(bazArr, contains("a", "b", "c"));
+
+ }
+
+ @Test
+ public void testFallbackToFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("foo.bar", "def")
+ .put("foo.baz", "ghi").build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+
+ settings = settingsBuilder()
+ .put("foo.bar", "def")
+ .put("foo", "abc")
+ .put("foo.baz", "ghi")
+ .build();
+ map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java b/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
new file mode 100644
index 0000000..d4d5d14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.bar;
+
+//used in ImmutableSettingsTest
+public class BarTestClass {
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java b/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
new file mode 100644
index 0000000..36f1527
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.foo;
+
+// used in ImmutableSettingsTest
+public class FooTestClass {
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
new file mode 100644
index 0000000..c237b96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JsonSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.json")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
new file mode 100644
index 0000000..d541d15
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class YamlSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleYamlSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.yml")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
new file mode 100644
index 0000000..7190648
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
@@ -0,0 +1,10 @@
+{
+ test1:{
+ value1:"value1",
+ test2:{
+ value2:"value2",
+ value3:2
+ },
+ test3:["test3-1", "test3-2"]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
new file mode 100644
index 0000000..b533ae0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
@@ -0,0 +1,8 @@
+test1:
+ value1: value1
+ test2:
+ value2: value2
+ value3: 2
+ test3:
+ - test3-1
+ - test3-2
diff --git a/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java b/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
new file mode 100644
index 0000000..8b39e4e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.ByteSizeUnit.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ByteSizeUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBytes() {
+ assertThat(BYTES.toBytes(1), equalTo(1l));
+ assertThat(BYTES.toKB(1024), equalTo(1l));
+ assertThat(BYTES.toMB(1024 * 1024), equalTo(1l));
+ assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testKB() {
+ assertThat(KB.toBytes(1), equalTo(1024l));
+ assertThat(KB.toKB(1), equalTo(1l));
+ assertThat(KB.toMB(1024), equalTo(1l));
+ assertThat(KB.toGB(1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testMB() {
+ assertThat(MB.toBytes(1), equalTo(1024l * 1024));
+ assertThat(MB.toKB(1), equalTo(1024l));
+ assertThat(MB.toMB(1), equalTo(1l));
+ assertThat(MB.toGB(1024), equalTo(1l));
+ }
+
+ @Test
+ public void testGB() {
+ assertThat(GB.toBytes(1), equalTo(1024l * 1024 * 1024));
+ assertThat(GB.toKB(1), equalTo(1024l * 1024));
+ assertThat(GB.toMB(1), equalTo(1024l));
+ assertThat(GB.toGB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testTB() {
+ assertThat(TB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(TB.toKB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(TB.toMB(1), equalTo(1024l * 1024));
+ assertThat(TB.toGB(1), equalTo(1024l));
+ assertThat(TB.toTB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testPB() {
+ assertThat(PB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024 * 1024));
+ assertThat(PB.toKB(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(PB.toMB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(PB.toGB(1), equalTo(1024l * 1024));
+ assertThat(PB.toTB(1), equalTo(1024l));
+ assertThat(PB.toPB(1), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
new file mode 100644
index 0000000..0522f87
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class ByteSizeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testActualPeta() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496l));
+ }
+
+ @Test
+ public void testActualTera() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104l));
+ }
+
+ @Test
+ public void testActual() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296l));
+ }
+
+ @Test
+ public void testSimple() {
+ assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).bytes()));
+ assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).kb()));
+ assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).mb()));
+ assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).gb()));
+ assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).tb()));
+ assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).pb()));
+ }
+
+ @Test
+ public void testToString() {
+ assertThat("10b", is(new ByteSizeValue(10, ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5kb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5mb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
+ assertThat("1.5gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
+ assertThat("1.5tb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
+ assertThat("1.5pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.TB).toString()));
+ assertThat("1536pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.PB).toString()));
+ }
+
+ @Test
+ public void testParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("42pb").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42P").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42PB").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54tb").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54T").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54TB").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12gb").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12G").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12GB").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12M").toString(), is("12mb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("1b").toString(), is("1b"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23kb").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23k").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23").toString(), is("23b"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("").toString(), is("23kb"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyNumberParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("g").toString(), is("23b"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
new file mode 100644
index 0000000..d84107e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DistanceUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleDistanceUnit() {
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.MILES), closeTo(16.09344, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.MILES), closeTo(10, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.KILOMETERS), closeTo(6.21371192, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.KILOMETERS), closeTo(10, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.METERS), closeTo(0.01, 0.00001));
+ assertThat(DistanceUnit.KILOMETERS.convert(1000,DistanceUnit.METERS), closeTo(1, 0.001));
+ assertThat(DistanceUnit.METERS.convert(1, DistanceUnit.KILOMETERS), closeTo(1000, 0.001));
+ }
+
+ @Test
+ public void testDistanceUnitParsing() {
+ assertThat(DistanceUnit.Distance.parseDistance("50km").unit, equalTo(DistanceUnit.KILOMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("500m").unit, equalTo(DistanceUnit.METERS));
+ assertThat(DistanceUnit.Distance.parseDistance("51mi").unit, equalTo(DistanceUnit.MILES));
+ assertThat(DistanceUnit.Distance.parseDistance("52yd").unit, equalTo(DistanceUnit.YARD));
+ assertThat(DistanceUnit.Distance.parseDistance("12in").unit, equalTo(DistanceUnit.INCH));
+ assertThat(DistanceUnit.Distance.parseDistance("23mm").unit, equalTo(DistanceUnit.MILLIMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("23cm").unit, equalTo(DistanceUnit.CENTIMETERS));
+
+ double testValue = 12345.678;
+ for (DistanceUnit unit : DistanceUnit.values()) {
+ assertThat("Unit can be parsed from '" + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
new file mode 100644
index 0000000..448d052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class FuzzinessTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNumerics() {
+ String[] options = new String[]{"1.0", "1", "1.000000"};
+ assertThat(Fuzziness.build(randomFrom(options)).asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.build(randomFrom(options)).asInt(), equalTo(1));
+ assertThat(Fuzziness.build(randomFrom(options)).asFloat(), equalTo(1f));
+ assertThat(Fuzziness.build(randomFrom(options)).asDouble(), equalTo(1d));
+ assertThat(Fuzziness.build(randomFrom(options)).asLong(), equalTo(1l));
+ assertThat(Fuzziness.build(randomFrom(options)).asShort(), equalTo((short) 1));
+ }
+
+ @Test
+ public void testParseFromXContent() throws IOException {
+ final int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ float floatValue = randomFloat();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, floatValue)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asFloat(), equalTo(floatValue));
+ assertThat(parse.asDouble(), closeTo((double) floatValue, 0.000001));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ Integer intValue = frequently() ? randomIntBetween(0, 2) : randomIntBetween(0, 100);
+ Float floatRep = randomFloat();
+ Number value = intValue;
+ if (randomBoolean()) {
+ value = new Float(floatRep += intValue);
+ }
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER), equalTo(XContentParser.Token.VALUE_STRING)));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asInt(), equalTo(intValue));
+ assertThat((int) parse.asShort(), equalTo(intValue));
+ assertThat((int) parse.asByte(), equalTo(intValue));
+ assertThat(parse.asLong(), equalTo((long) intValue));
+ if (value.intValue() >= 1) {
+ assertThat(parse.asDistance(), equalTo(Math.min(2, intValue)));
+ }
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ if (intValue.equals(value)) {
+ switch (intValue) {
+ case 1:
+ assertThat(parse, sameInstance(Fuzziness.ONE));
+ break;
+ case 2:
+ assertThat(parse, sameInstance(Fuzziness.TWO));
+ break;
+ case 0:
+ assertThat(parse, sameInstance(Fuzziness.ZERO));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? "AUTO" : "auto")
+ .endObject().string();
+ if (randomBoolean()) {
+ json = Fuzziness.AUTO.toXContent(jsonBuilder().startObject(), null).endObject().string();
+ }
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse, sameInstance(Fuzziness.AUTO));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+
+ {
+ String[] values = new String[]{"d", "H", "ms", "s", "S", "w"};
+ String actual = randomIntBetween(1, 3) + randomFrom(values);
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, actual)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asTimeValue(), equalTo(TimeValue.parseTimeValue(actual, null)));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+ }
+
+ }
+
+ @Test
+ public void testAuto() {
+ final int codePoints = randomIntBetween(0, 10);
+ String string = randomRealisticUnicodeOfCodepointLength(codePoints);
+ if (codePoints <= 2) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(0));
+ } else if (codePoints > 5) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(2));
+ } else {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(1));
+ }
+ assertThat(Fuzziness.AUTO.asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.AUTO.asInt(), equalTo(1));
+ assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f));
+ assertThat(Fuzziness.AUTO.asDouble(), equalTo(1d));
+ assertThat(Fuzziness.AUTO.asLong(), equalTo(1l));
+ assertThat(Fuzziness.AUTO.asShort(), equalTo((short) 1));
+ assertThat(Fuzziness.AUTO.asTimeValue(), equalTo(TimeValue.parseTimeValue("1", TimeValue.timeValueMillis(1))));
+
+ }
+
+ @Test
+ public void testAsDistance() {
+ final int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ Integer integer = Integer.valueOf(randomIntBetween(0, 10));
+ String value = "" + (randomBoolean() ? integer.intValue() : integer.floatValue());
+ assertThat(Fuzziness.build(value).asDistance(), equalTo(Math.min(2, integer.intValue())));
+ }
+ }
+
+ @Test
+ public void testSimilarityToDistance() {
+ assertThat(Fuzziness.fromSimilarity(0.5f).asDistance("ab"), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(0.66f).asDistance("abcefg"), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("ab"), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("abcefg"), equalTo(1));
+ assertThat((double) Fuzziness.ONE.asSimilarity("abcefg"), closeTo(0.8f, 0.05));
+ assertThat((double) Fuzziness.TWO.asSimilarity("abcefg"), closeTo(0.66f, 0.05));
+ assertThat((double) Fuzziness.ONE.asSimilarity("ab"), closeTo(0.5f, 0.05));
+
+ int iters = atLeast(100);
+ for (int i = 0; i < iters; i++) {
+ Fuzziness fuzziness = Fuzziness.fromEdits(between(1, 2));
+ String string = rarely() ? randomRealisticUnicodeOfLengthBetween(2, 4) :
+ randomRealisticUnicodeOfLengthBetween(4, 10);
+ float similarity = fuzziness.asSimilarity(string);
+ if (similarity != 0.0f) {
+ Fuzziness similarityBased = Fuzziness.build(similarity);
+ assertThat((double) similarityBased.asSimilarity(string), closeTo(similarity, 0.05));
+ assertThat(similarityBased.asDistance(string), equalTo(Math.min(2, fuzziness.asDistance(string))));
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
new file mode 100644
index 0000000..6ca424a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.PeriodType;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ *
+ */
+public class TimeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() {
+ assertThat(TimeUnit.MILLISECONDS.toMillis(10), equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).millis()));
+ assertThat(TimeUnit.MICROSECONDS.toMicros(10), equalTo(new TimeValue(10, TimeUnit.MICROSECONDS).micros()));
+ assertThat(TimeUnit.SECONDS.toSeconds(10), equalTo(new TimeValue(10, TimeUnit.SECONDS).seconds()));
+ assertThat(TimeUnit.MINUTES.toMinutes(10), equalTo(new TimeValue(10, TimeUnit.MINUTES).minutes()));
+ assertThat(TimeUnit.HOURS.toHours(10), equalTo(new TimeValue(10, TimeUnit.HOURS).hours()));
+ assertThat(TimeUnit.DAYS.toDays(10), equalTo(new TimeValue(10, TimeUnit.DAYS).days()));
+ }
+
+ @Test
+ public void testToString() {
+ assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5s", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5m", equalTo(new TimeValue(90, TimeUnit.SECONDS).toString()));
+ assertThat("1.5h", equalTo(new TimeValue(90, TimeUnit.MINUTES).toString()));
+ assertThat("1.5d", equalTo(new TimeValue(36, TimeUnit.HOURS).toString()));
+ assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).toString()));
+ }
+
+ @Test
+ public void testFormat() {
+ assertThat(new TimeValue(1025, TimeUnit.MILLISECONDS).format(PeriodType.dayTime()), equalTo("1 second and 25 milliseconds"));
+ assertThat(new TimeValue(1, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 minute"));
+ assertThat(new TimeValue(65, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 hour and 5 minutes"));
+ assertThat(new TimeValue(24 * 600 + 85, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("241 hours and 25 minutes"));
+ }
+
+ @Test
+ public void testMinusOne() {
+ assertThat(new TimeValue(-1).nanos(), lessThan(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
new file mode 100644
index 0000000..514caad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Arrays;
+
+public class BigArraysTests extends ElasticsearchTestCase {
+
+ public static PageCacheRecycler randomCacheRecycler() {
+ return randomBoolean() ? null : new MockPageCacheRecycler(ImmutableSettings.EMPTY, new ThreadPool());
+ }
+
+ public void testByteArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 4000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ByteArray array = BigArrays.newByteArray(startLen, randomCacheRecycler(), randomBoolean());
+ byte[] ref = new byte[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomByte();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testIntArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ IntArray array = BigArrays.newIntArray(startLen, randomCacheRecycler(), randomBoolean());
+ int[] ref = new int[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomInt();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testLongArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ LongArray array = BigArrays.newLongArray(startLen, randomCacheRecycler(), randomBoolean());
+ long[] ref = new long[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomLong();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testDoubleArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ DoubleArray array = BigArrays.newDoubleArray(startLen, randomCacheRecycler(), randomBoolean());
+ double[] ref = new double[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomDouble();
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i), 0.001d);
+ }
+ array.release();
+ }
+
+ public void testObjectArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ObjectArray<Object> array = BigArrays.newObjectArray(startLen, randomCacheRecycler());
+ final Object[] pool = new Object[100];
+ for (int i = 0; i < pool.length; ++i) {
+ pool[i] = new Object();
+ }
+ Object[] ref = new Object[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomFrom(pool);
+ array = BigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertSame(ref[i], array.get(i));
+ }
+ array.release();
+ }
+
+ public void testDoubleArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final DoubleArray array2 = BigArrays.newDoubleArray(len, randomCacheRecycler(), randomBoolean());
+ final double[] array1 = new double[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomDouble();
+ array2.set(i, array1[i]);
+ }
+ final double rand = randomDouble();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i), 0.001d);
+ }
+ array2.release();
+ }
+
+ public void testLongArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final LongArray array2 = BigArrays.newLongArray(len, randomCacheRecycler(), randomBoolean());
+ final long[] array1 = new long[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomLong();
+ array2.set(i, array1[i]);
+ }
+ final long rand = randomLong();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.release();
+ }
+
+ public void testByteArrayBulkGet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = BigArrays.newByteArray(array1.length, randomCacheRecycler(), randomBoolean());
+ for (int i = 0; i < array1.length; ++i) {
+ array2.set(i, array1[i]);
+ }
+ final BytesRef ref = new BytesRef();
+ for (int i = 0; i < 1000; ++i) {
+ final int offset = randomInt(array1.length - 1);
+ final int len = randomInt(Math.min(randomBoolean() ? 10 : Integer.MAX_VALUE, array1.length - offset));
+ array2.get(offset, len, ref);
+ assertEquals(new BytesRef(array1, offset, len), ref);
+ }
+ array2.release();
+ }
+
+ public void testByteArrayBulkSet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = BigArrays.newByteArray(array1.length, randomCacheRecycler(), randomBoolean());
+ for (int i = 0; i < array1.length; ) {
+ final int len = Math.min(array1.length - i, randomBoolean() ? randomInt(10) : randomInt(3 * BigArrays.BYTE_PAGE_SIZE));
+ array2.set(i, array1, i, len);
+ i += len;
+ }
+ for (int i = 0; i < array1.length; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.release();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java b/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
new file mode 100644
index 0000000..7331962
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+
+public class ByteUtilsTests extends ElasticsearchTestCase {
+
+ public void testZigZag(long l) {
+ assertEquals(l, ByteUtils.zigZagDecode(ByteUtils.zigZagEncode(l)));
+ }
+
+ public void testZigZag() {
+ testZigZag(0);
+ testZigZag(1);
+ testZigZag(-1);
+ testZigZag(Long.MAX_VALUE);
+ testZigZag(Long.MIN_VALUE);
+ for (int i = 0; i < 1000; ++i) {
+ testZigZag(randomLong());
+ assertTrue(ByteUtils.zigZagEncode(randomInt(1000)) >= 0);
+ assertTrue(ByteUtils.zigZagEncode(-randomInt(1000)) >= 0);
+ }
+ }
+
+ public void testFloat() throws IOException {
+ final float[] data = new float[atLeast(1000)];
+ final byte[] encoded = new byte[data.length * 4];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomFloat();
+ ByteUtils.writeFloatLE(data[i], encoded, i * 4);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readFloatLE(encoded, i * 4), Float.MIN_VALUE);
+ }
+ }
+
+ public void testDouble() throws IOException {
+ final double[] data = new double[atLeast(1000)];
+ final byte[] encoded = new byte[data.length * 8];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomDouble();
+ ByteUtils.writeDoubleLE(data[i], encoded, i * 8);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readDoubleLE(encoded, i * 8), Double.MIN_VALUE);
+ }
+ }
+
+ public void testVLong() throws IOException {
+ final long[] data = new long[atLeast(1000)];
+ for (int i = 0; i < data.length; ++i) {
+ switch (randomInt(4)) {
+ case 0:
+ data[i] = 0;
+ break;
+ case 1:
+ data[i] = Long.MAX_VALUE;
+ break;
+ case 2:
+ data[i] = Long.MIN_VALUE;
+ break;
+ case 3:
+ data[i] = randomInt(1 << randomIntBetween(2,30));
+ break;
+ case 4:
+ data[i] = randomLong();
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+ final byte[] encoded = new byte[ByteUtils.MAX_BYTES_VLONG * data.length];
+ ByteArrayDataOutput out = new ByteArrayDataOutput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ final int pos = out.getPosition();
+ ByteUtils.writeVLong(out, data[i]);
+ if (data[i] < 0) {
+ assertEquals(ByteUtils.MAX_BYTES_VLONG, out.getPosition() - pos);
+ }
+ }
+ final ByteArrayDataInput in = new ByteArrayDataInput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readVLong(in));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
new file mode 100644
index 0000000..c541c3e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+public class CollectionUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void rotateEmpty() {
+ assertTrue(CollectionUtils.rotate(ImmutableList.of(), randomInt()).isEmpty());
+ }
+
+ @Test
+ public void rotate() {
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int k = 0; k < iters; ++k) {
+ final int size = randomIntBetween(1, 100);
+ final int distance = randomInt();
+ List<Object> list = new ArrayList<Object>();
+ for (int i = 0; i < size; ++i) {
+ list.add(new Object());
+ }
+ final List<Object> rotated = CollectionUtils.rotate(list, distance);
+ // check content is the same
+ assertEquals(rotated.size(), list.size());
+ assertEquals(Iterables.size(rotated), list.size());
+ assertEquals(new HashSet<Object>(rotated), new HashSet<Object>(list));
+ // check stability
+ for (int j = randomInt(4); j >= 0; --j) {
+ assertEquals(rotated, CollectionUtils.rotate(list, distance));
+ }
+ // reverse
+ if (distance != Integer.MIN_VALUE) {
+ assertEquals(list, CollectionUtils.rotate(CollectionUtils.rotate(list, distance), -distance));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java
new file mode 100644
index 0000000..e17725b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedDoubleListTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for {@link SlicedDoubleList}
+ */
+public class SlicedDoubleListTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCapacity() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+
+ list = new SlicedDoubleList(new double[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ int expected = 0;
+ for (Double d : list) {
+ assertThat((double)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((double)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((double)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99.0d)));
+ assertThat(99, equalTo(list.indexOf(99.0d)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100.0d)));
+ assertThat(-1, equalTo(list.indexOf(100.0d)));
+ }
+
+ public void testIsEmpty() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testSet() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ try {
+ list.set(0, (double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testToString() {
+ SlicedDoubleList list = new SlicedDoubleList(5);
+ assertThat("[0.0, 0.0, 0.0, 0.0, 0.0]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ assertThat("[0.0, 1.0, 2.0, 3.0, 4.0]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java
new file mode 100644
index 0000000..2669501
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedLongListTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for {@link SlicedLongList}
+ */
+public class SlicedLongListTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCapacity() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+ list = new SlicedLongList(new long[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ SlicedLongList list = new SlicedLongList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i);
+ }
+ int expected = 0;
+ for (Long d : list) {
+ assertThat((long)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((long)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((long)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testSet() {
+ SlicedLongList list = new SlicedLongList(5);
+ try {
+ list.set(0, (long)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((long)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ SlicedLongList list = new SlicedLongList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99l)));
+ assertThat(99, equalTo(list.indexOf(99l)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100l)));
+ assertThat(-1, equalTo(list.indexOf(100l)));
+ }
+
+ public void testIsEmpty() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testToString() {
+ SlicedLongList list = new SlicedLongList(5);
+ assertThat("[0, 0, 0, 0, 0]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((long)i);
+ }
+ assertThat("[0, 1, 2, 3, 4]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java b/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java
new file mode 100644
index 0000000..7e98073
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/SlicedObjectListTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+/**
+ * Tests for {@link SlicedObjectList}
+ */
+public class SlicedObjectListTests extends ElasticsearchTestCase {
+
+ public class TestList extends SlicedObjectList<Double> {
+
+ public TestList(int capactiy) {
+ this(new Double[capactiy], 0, capactiy);
+ }
+
+ public TestList(Double[] values, int offset, int length) {
+ super(values, offset, length);
+ }
+
+ public TestList(Double[] values) {
+ super(values);
+ }
+
+ @Override
+ public void grow(int newLength) {
+ assertThat(offset, equalTo(0)); // NOTE: senseless if offset != 0
+ if (values.length >= newLength) {
+ return;
+ }
+ final Double[] current = values;
+ values = new Double[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+ System.arraycopy(current, 0, values, 0, current.length);
+
+ }
+
+ }
+ @Test
+ public void testCapacity() {
+ TestList list = new TestList(5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(0));
+ assertThat(list.values.length, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+
+
+ list = new TestList(new Double[10], 5, 5);
+ assertThat(list.length, equalTo(5));
+ assertThat(list.offset, equalTo(5));
+ assertThat(list.size(), equalTo(5));
+ assertThat(list.values.length, equalTo(10));
+ }
+
+ @Test
+ public void testGrow() {
+ TestList list = new TestList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ int expected = 0;
+ for (Double d : list) {
+ assertThat((double)expected++, equalTo(d));
+ }
+
+ for (int i = 0; i < list.length; i++) {
+ assertThat((double)i, equalTo(list.get(i)));
+ }
+
+ int count = 0;
+ for (int i = list.offset; i < list.offset+list.length; i++) {
+ assertThat((double)count++, equalTo(list.values[i]));
+ }
+ }
+
+ @Test
+ public void testIndexOf() {
+ TestList list = new TestList(5);
+ list.length = 1000;
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i%100);
+ }
+
+ assertThat(999, equalTo(list.lastIndexOf(99.0d)));
+ assertThat(99, equalTo(list.indexOf(99.0d)));
+
+ assertThat(-1, equalTo(list.lastIndexOf(100.0d)));
+ assertThat(-1, equalTo(list.indexOf(100.0d)));
+ }
+
+ public void testIsEmpty() {
+ TestList list = new TestList(5);
+ assertThat(false, equalTo(list.isEmpty()));
+ list.length = 0;
+ assertThat(true, equalTo(list.isEmpty()));
+ }
+
+ @Test
+ public void testSet() {
+ TestList list = new TestList(5);
+ try {
+ list.set(0, (double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ try {
+ list.add((double)4);
+ fail();
+ } catch (UnsupportedOperationException ex) {
+ }
+ }
+
+ @Test
+ public void testToString() {
+ TestList list = new TestList(5);
+ assertThat("[null, null, null, null, null]", equalTo(list.toString()));
+ for (int i = 0; i < list.length; i++) {
+ list.grow(i+1);
+ list.values[i] = ((double)i);
+ }
+ assertThat("[0.0, 1.0, 2.0, 3.0, 4.0]", equalTo(list.toString()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java b/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
new file mode 100644
index 0000000..8997969
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+
+public class CountDownTest extends ElasticsearchTestCase {
+
+ @Test @Repeat(iterations = 1000)
+ public void testConcurrent() throws InterruptedException {
+ final AtomicInteger count = new AtomicInteger(0);
+ final CountDown countDown = new CountDown(atLeast(10));
+ Thread[] threads = new Thread[atLeast(3)];
+ final CountDownLatch latch = new CountDownLatch(1);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ while (true) {
+ if(frequently()) {
+ if (countDown.isCountedDown()) {
+ break;
+ }
+ }
+ if (countDown.countDown()) {
+ count.incrementAndGet();
+ break;
+ }
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ Thread.yield();
+ if (rarely()) {
+ if (countDown.fastForward()) {
+ count.incrementAndGet();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+
+ }
+
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(count.get(), Matchers.equalTo(1));
+ }
+
+ @Test
+ public void testSingleThreaded() {
+ int atLeast = atLeast(10);
+ final CountDown countDown = new CountDown(atLeast);
+ while(!countDown.isCountedDown()) {
+ atLeast--;
+ if (countDown.countDown()) {
+ assertThat(atLeast, equalTo(0));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ break;
+ }
+ if (rarely()) {
+ assertThat(countDown.fastForward(), equalTo(true));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ }
+ assertThat(atLeast, greaterThan(0));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
new file mode 100644
index 0000000..d4d46db
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ */
+public class EsExecutorsTests extends ElasticsearchTestCase {
+
+ private TimeUnit randomTimeUnit() {
+ return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];
+ }
+
+ @Test
+ public void testFixedForcedExecution() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ final CountDownLatch exec3Wait = new CountDownLatch(1);
+ executor.execute(new AbstractRunnable() {
+ @Override
+ public void run() {
+ executed3.set(true);
+ exec3Wait.countDown();
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+ });
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+ exec3Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(true));
+
+ executor.shutdownNow();
+ }
+
+ @Test
+ public void testFixedRejected() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ try {
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed3.set(true);
+ }
+ });
+ fail("should be rejected...");
+ } catch (EsRejectedExecutionException e) {
+ // all is well
+ }
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(false));
+
+ executor.shutdownNow();
+ }
+
+ @Test
+ public void testScaleUp() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ pool.shutdown();
+ }
+
+ @Test
+ public void testScaleDown() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ final ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return pool.getActiveCount() == 0 && pool.getPoolSize() < max;
+ }
+ });
+ //assertThat("not all tasks completed", pool.getCompletedTaskCount(), equalTo((long) max));
+ assertThat("wrong active count", pool.getActiveCount(), equalTo(0));
+ //assertThat("wrong pool size. ", min, equalTo(pool.getPoolSize())); //BUG in ThreadPool - Bug ID: 6458662
+ //assertThat("idle threads didn't stay above min (" + pool.getPoolSize() + ")", pool.getPoolSize(), greaterThan(0));
+ assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max));
+ pool.shutdown();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
new file mode 100644
index 0000000..bd17217
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PrioritizedExecutorsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPriorityQueue() throws Exception {
+ PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<Priority>();
+ queue.add(Priority.LANGUID);
+ queue.add(Priority.NORMAL);
+ queue.add(Priority.HIGH);
+ queue.add(Priority.LOW);
+ queue.add(Priority.URGENT);
+
+ assertThat(queue.poll(), equalTo(Priority.URGENT));
+ assertThat(queue.poll(), equalTo(Priority.HIGH));
+ assertThat(queue.poll(), equalTo(Priority.NORMAL));
+ assertThat(queue.poll(), equalTo(Priority.LOW));
+ assertThat(queue.poll(), equalTo(Priority.LANGUID));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new Job(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new Job(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new Job(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new Job(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testExecutePrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.execute(new AwaitingJob(awaitingLatch));
+ executor.execute(new Job(6, Priority.LANGUID, results, finishedLatch));
+ executor.execute(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.execute(new Job(1, Priority.HIGH, results, finishedLatch));
+ executor.execute(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.execute(new Job(0, Priority.URGENT, results, finishedLatch));
+ executor.execute(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.execute(new Job(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new CallableJob(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new CallableJob(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new CallableJob(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ List<Integer> results = new ArrayList<Integer>(7);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(7);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(6, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(4, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(1, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(0, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(3, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(7));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor();
+ PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(Executors.defaultThreadFactory());
+ final CountDownLatch block = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "the blocking";
+ }
+ });
+
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executeCalled.set(true);
+ }
+
+ @Override
+ public String toString() {
+ return "the waiting";
+ }
+ }, timer, TimeValue.timeValueMillis(100) /* enough timeout to catch them in the pending list... */, new Runnable() {
+ @Override
+ public void run() {
+ timedOut.countDown();
+ }
+ }
+ );
+
+ PrioritizedEsThreadPoolExecutor.Pending[] pending = executor.getPending();
+ assertThat(pending.length, equalTo(1));
+ assertThat(pending[0].task.toString(), equalTo("the waiting"));
+
+ assertThat(timedOut.await(2, TimeUnit.SECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+
+ timer.shutdownNow();
+ executor.shutdownNow();
+ }
+
+ static class AwaitingJob extends PrioritizedRunnable {
+
+ private final CountDownLatch latch;
+
+ private AwaitingJob(CountDownLatch latch) {
+ super(Priority.URGENT);
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ static class Job extends PrioritizedRunnable {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ Job(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ results.add(result);
+ latch.countDown();
+ }
+ }
+
+ static class CallableJob extends PrioritizedCallable<Integer> {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ CallableJob(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ results.add(result);
+ latch.countDown();
+ return result;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java b/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
new file mode 100644
index 0000000..c8911c6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BuilderRawFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testJsonRawField() throws IOException {
+ testRawField(XContentType.JSON);
+ }
+
+ @Test
+ public void testSmileRawField() throws IOException {
+ testRawField(XContentType.SMILE);
+ }
+
+ @Test
+ public void testYamlRawField() throws IOException {
+ testRawField(XContentType.YAML);
+ }
+
+ private void testRawField(XContentType type) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(type);
+ builder.startObject();
+ builder.field("field1", "value1");
+ builder.rawField("_source", XContentFactory.contentBuilder(type).startObject().field("s_field", "s_value").endObject().bytes());
+ builder.field("field2", "value2");
+ builder.rawField("payload_i", new BytesArray(Long.toString(1)));
+ builder.field("field3", "value3");
+ builder.rawField("payload_d", new BytesArray(Double.toString(1.1)));
+ builder.field("field4", "value4");
+ builder.rawField("payload_s", new BytesArray("test"));
+ builder.field("field5", "value5");
+ builder.endObject();
+
+ XContentParser parser = XContentFactory.xContent(type).createParser(builder.bytes());
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field1"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value1"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("_source"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("s_field"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("s_value"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field2"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value2"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_i"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.INT));
+ assertThat(parser.longValue(), equalTo(1l));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field3"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value3"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_d"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.DOUBLE));
+ assertThat(parser.doubleValue(), equalTo(1.1d));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field4"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value4"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_s"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("test"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field5"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value5"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
new file mode 100644
index 0000000..67bb99f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.io.FastCharArrayWriter;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.CAMELCASE;
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class XContentBuilderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPrettyWithLfAtEnd() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.usePrettyPrint();
+ generator.usePrintLineFeedAtEnd();
+
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ generator.close();
+ // double close, and check there is no error...
+ generator.close();
+
+ assertThat(writer.unsafeCharArray()[writer.size() - 1], equalTo('\n'));
+ }
+
+ @Test
+ public void verifyReuseJsonGenerator() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+
+ // try again...
+ writer.reset();
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+ // we get a space at the start here since it thinks we are not in the root object (fine, we will ignore it in the real code we use)
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testSimpleGenerator() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testOverloadedList() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", Lists.newArrayList("1", "2")).endObject();
+ assertThat(builder.string(), equalTo("{\"test\":[\"1\",\"2\"]}"));
+ }
+
+ @Test
+ public void testWritingBinaryToStream() throws Exception {
+ BytesStreamOutput bos = new BytesStreamOutput();
+
+ XContentGenerator gen = XContentFactory.xContent(XContentType.JSON).createGenerator(bos);
+ gen.writeStartObject();
+ gen.writeStringField("name", "something");
+ gen.flush();
+ bos.write(", source : { test : \"value\" }".getBytes("UTF8"));
+ gen.writeStringField("name2", "something2");
+ gen.writeEndObject();
+ gen.close();
+
+ byte[] data = bos.bytes().toBytes();
+ String sData = new String(data, "UTF8");
+ System.out.println("DATA: " + sData);
+ }
+
+ @Test
+ public void testFieldCaseConversion() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(CAMELCASE);
+ builder.startObject().field("test_name", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"testName\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(UNDERSCORE);
+ builder.startObject().field("testName", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test_name\":\"value\"}"));
+ }
+
+ @Test
+ public void testDateTypesConversion() throws Exception {
+ Date date = new Date();
+ String expectedDate = XContentBuilder.defaultDatePrinter.print(date.getTime());
+ Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT);
+ String expectedCalendar = XContentBuilder.defaultDatePrinter.print(calendar.getTimeInMillis());
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("date", date).endObject();
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("calendar", calendar).endObject();
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("date", date);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ map = new HashMap<String, Object>();
+ map.put("calendar", calendar);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java b/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
new file mode 100644
index 0000000..0a57adf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class JsonVsSmileTests extends ElasticsearchTestCase {
+
+// @Test public void testBinarySmileField() throws Exception {
+// JsonGenerator gen = new SmileFactory().createJsonGenerator(new ByteArrayOutputStream());
+//// JsonGenerator gen = new JsonFactory().createJsonGenerator(new ByteArrayOutputStream(), JsonEncoding.UTF8);
+// gen.writeStartObject();
+// gen.writeFieldName("field1");
+// gen.writeBinary(new byte[]{1, 2, 3});
+// gen.writeEndObject();
+// }
+
+ @Test
+ public void compareParsingTokens() throws IOException {
+ BytesStreamOutput xsonOs = new BytesStreamOutput();
+ XContentGenerator xsonGen = XContentFactory.xContent(XContentType.SMILE).createGenerator(xsonOs);
+
+ BytesStreamOutput jsonOs = new BytesStreamOutput();
+ XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs);
+
+ xsonGen.writeStartObject();
+ jsonGen.writeStartObject();
+
+ xsonGen.writeStringField("test", "value");
+ jsonGen.writeStringField("test", "value");
+
+ xsonGen.writeArrayFieldStart("arr");
+ jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeNumber(1);
+ jsonGen.writeNumber(1);
+ xsonGen.writeNull();
+ jsonGen.writeNull();
+ xsonGen.writeEndArray();
+ jsonGen.writeEndArray();
+
+ xsonGen.writeEndObject();
+ jsonGen.writeEndObject();
+
+ xsonGen.close();
+ jsonGen.close();
+
+ verifySameTokens(XContentFactory.xContent(XContentType.JSON).createParser(jsonOs.bytes().toBytes()), XContentFactory.xContent(XContentType.SMILE).createParser(xsonOs.bytes().toBytes()));
+ }
+
+ private void verifySameTokens(XContentParser parser1, XContentParser parser2) throws IOException {
+ while (true) {
+ XContentParser.Token token1 = parser1.nextToken();
+ XContentParser.Token token2 = parser2.nextToken();
+ if (token1 == null) {
+ assertThat(token2, nullValue());
+ return;
+ }
+ assertThat(token1, equalTo(token2));
+ switch (token1) {
+ case FIELD_NAME:
+ assertThat(parser1.currentName(), equalTo(parser2.currentName()));
+ break;
+ case VALUE_STRING:
+ assertThat(parser1.text(), equalTo(parser2.text()));
+ break;
+ case VALUE_NUMBER:
+ assertThat(parser1.numberType(), equalTo(parser2.numberType()));
+ assertThat(parser1.numberValue(), equalTo(parser2.numberValue()));
+ break;
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
new file mode 100644
index 0000000..0f9e4ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class XContentHelperTests extends ElasticsearchTestCase {
+
+ Map<String, Object> getMap(Object... keyValues) {
+ Map<String, Object> map = new HashMap<String, Object>();
+ for (int i = 0; i < keyValues.length; i++) {
+ map.put((String) keyValues[i], keyValues[++i]);
+ }
+ return map;
+ }
+
+ Map<String, Object> getNamedMap(String name, Object... keyValues) {
+ Map<String, Object> map = getMap(keyValues);
+
+ Map<String, Object> namedMap = new HashMap<String, Object>(1);
+ namedMap.put(name, map);
+ return namedMap;
+ }
+
+ List<Object> getList(Object... values) {
+ return Arrays.asList(values);
+ }
+
+ @Test
+ public void testMergingListValuesAreMapsOfOne() {
+
+ Map<String, Object> defaults = getMap("test", getList(getNamedMap("name1", "t1", "1"), getNamedMap("name2", "t2", "2")));
+ Map<String, Object> content = getMap("test", getList(getNamedMap("name2", "t3", "3"), getNamedMap("name4", "t4", "4")));
+ Map<String, Object> expected = getMap("test",
+ getList(getNamedMap("name2", "t2", "2", "t3", "3"), getNamedMap("name4", "t4", "4"), getNamedMap("name1", "t1", "1")));
+
+ XContentHelper.mergeDefaults(content, defaults);
+
+ assertThat(content, Matchers.equalTo(expected));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java b/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
new file mode 100644
index 0000000..e2fba5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ */
+public class XContentMapValuesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFilter() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("something_else", "value3")
+ .endObject();
+
+ Map<String, Object> source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ Map<String, Object> filter = XContentMapValues.filter(source, new String[]{"test1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+
+ filter = XContentMapValues.filter(source, new String[]{"test*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+
+ filter = XContentMapValues.filter(source, Strings.EMPTY_ARRAY, new String[]{"test1"});
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+ assertThat(filter.get("something_else").toString(), equalTo("value3"));
+
+ // more complex object...
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .field("test1", "value1")
+ .endObject();
+
+ source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ filter = XContentMapValues.filter(source, new String[]{"path1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"test1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("test1"), equalTo(source.get("test1")));
+ assertThat(filter.containsKey("path1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1.path2.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("test.me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("something.else.2", map), nullValue());
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.path2.test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("path1.path2.test_me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("path1.non_path2.test", map), nullValue());
+
+ Object extValue = XContentMapValues.extractValue("path1.path2", map);
+ assertThat(extValue, instanceOf(Map.class));
+ Map<String, Object> extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue, hasEntry("test", (Object) "value"));
+
+ extValue = XContentMapValues.extractValue("path1", map);
+ assertThat(extValue, instanceOf(Map.class));
+ extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue.containsKey("path2"), equalTo(true));
+
+ // lists
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("test", "value1", "value2").endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ List extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.path2.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+ assertThat(extListValue.get(0).toString(), equalTo("value1"));
+ assertThat(extListValue.get(1).toString(), equalTo("value2"));
+
+ // fields with . in them
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("xxx.yyy", "value")
+ .endObject();
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("xxx.yyy", map).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.xxx.path2.yyy.test", map).toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractRawValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("test.me", "value")
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test.me", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.path2.test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.xxx.path2.yyy.test", map).get(0).toString(), equalTo("value"));
+ }
+
+ @Test
+ public void prefixedNamesFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("obj", "value");
+ map.put("obj_name", "value_name");
+ Map<String, Object> filterdMap = XContentMapValues.filter(map, new String[]{"obj_name"}, Strings.EMPTY_ARRAY);
+ assertThat(filterdMap.size(), equalTo(1));
+ assertThat((String) filterdMap.get("obj_name"), equalTo("value_name"));
+ }
+
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void nestedFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("nested", 2);
+ put("nested_2", 3);
+ }}));
+ Map<String, Object> falteredMap = XContentMapValues.filter(map, new String[]{"array.nested"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+
+ // Selecting members of objects within arrays (ex. [ 1, { nested: "value"} ]) always returns all values in the array (1 in the ex)
+ // this is expected behavior as this types of objects are not supported in ES
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat((Integer) ((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).get("nested"), equalTo(2));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"array.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ map.clear();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.field"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(1));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(2));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field2"), equalTo("value2"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void completeObjectFilteringTest() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }}));
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field2").toString(), equalTo("value2"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).get("field").toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void filterIncludesUsingStarPrefix() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("n_obj",
+ new HashMap<String, Object>() {{
+ put("n_field", "value");
+ put("n_field2", "value2");
+ }});
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"*.field2"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field2"));
+
+ // only objects
+ filteredMap = XContentMapValues.filter(map, new String[]{"*.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(2));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(2));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"*"}, new String[]{"*.*2"});
+ assertThat(filteredMap.size(), equalTo(3));
+ assertThat(filteredMap, hasKey("field"));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field"));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")), hasKey("n_field"));
+
+ }
+
+ @Test
+ public void filterWithEmptyIncludesExcludes() {
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("field", "value");
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap.get("field").toString(), equalTo("value"));
+
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingIncludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj"}, Strings.EMPTY_ARRAY);
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingExcludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"nonExistingField"});
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testNotOmittingObjectsWithExcludedProperties() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .field("f1", "v1")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"obj.f1"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj"));
+ assertThat(((Map) filteredSource.get("obj")).size(), equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testNotOmittingObjectWithNestedExcludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .startObject("obj3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ // implicit include
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"*.obj2"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // explicit include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj1"}, new String[]{"*.obj2"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // wild card include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, new String[]{"*.obj3"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), Matchers.equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testIncludingObjectWithNestedIncludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, Strings.EMPTY_ARRAY);
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), equalTo(0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java b/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
new file mode 100644
index 0000000..689107a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.consistencylevel;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class WriteConsistencyLevelTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testWriteConsistencyLevelReplication2() throws Exception {
+ prepareCreate("test", 1, ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // indexing, by default, will work (ONE consistency level)
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test")).setConsistencyLevel(WriteConsistencyLevel.ONE).execute().actionGet();
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ allowNodes("test", 2);
+
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ allowNodes("test", 3);
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java b/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java
new file mode 100644
index 0000000..ecb1cf7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java
@@ -0,0 +1,827 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.count.query;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void passQueryAsStringTest() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ CountResponse countResponse = client().prepareCount().setSource(new BytesArray("{ \"query\" : { \"term\" : { \"field1\" : \"value1_1\" }}}").array()).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testIndexOptions() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=docs")
+ .setSettings("index.number_of_shards", 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).get();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field2", "quick brown").type(Type.PHRASE).slop(0)).get();
+ assertHitCount(countResponse, 1l);
+ try {
+ client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "quick brown").type(Type.PHRASE).slop(0)).get();
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue("wrong exception message " + e.getMessage(), e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test
+ public void testCommonTermsQuery() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertHitCount(countResponse, 2l);
+
+ // Default
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("3")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("4")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setSource(new BytesArray("{ \"query\" : { \"common\" : { \"field1\" : { \"query\" : \"the lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 4 } } } } }").array()).get();
+ assertHitCount(countResponse, 1l);
+
+ // Default
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.commonTerms("field1", "the quick brown").cutoffFrequency(3).analyzer("standard")).get();
+ assertHitCount(countResponse, 3l);
+ // standard drops "the" since its a stopword
+
+ // try the same with match query
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(countResponse, 3l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(countResponse, 3l);
+ // standard drops "the" since its a stopword
+
+ // try the same with multi match query
+ countResponse = client().prepareCount().setQuery(QueryBuilders.multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(countResponse, 3l);
+ }
+
+ @Test
+ public void queryStringAnalyzedWildcard() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("value*").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("*ue*").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("*ue_1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("val*e_1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("v?l*e?1").analyzeWildcard(true)).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testLowercaseExpandedTerms() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(true)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 0l);
+ countResponse = client().prepareCount().setQuery(queryString("ValUE_*").lowercaseExpandedTerms(true)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("vAl*E_1")).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("[VALUE_1 TO VALUE_3]")).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount().setQuery(queryString("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testDateRangeInQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ //D is an unsupported unit in date math
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(1));
+ assertThat(countResponse.getShardFailures().length, equalTo(1));
+ assertThat(countResponse.getShardFailures()[0].reason(), allOf(containsString("Failed to parse"), containsString("unit [D] not supported for date math")));
+ }
+
+ @Test
+ public void typeFilterTypeIndexedTests() throws Exception {
+ typeFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void typeFilterTypeNotIndexedTests() throws Exception {
+ typeFilterTests("no");
+ }
+
+ private void typeFilterTests(String index) throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "3").setSource("field1", "value1"));
+
+ assertHitCount(client().prepareCount().setQuery(filteredQuery(matchAllQuery(), typeFilter("type1"))).get(), 2l);
+ assertHitCount(client().prepareCount().setQuery(filteredQuery(matchAllQuery(), typeFilter("type2"))).get(), 3l);
+
+ assertHitCount(client().prepareCount().setTypes("type1").setQuery(matchAllQuery()).get(), 2l);
+ assertHitCount(client().prepareCount().setTypes("type2").setQuery(matchAllQuery()).get(), 3l);
+
+ assertHitCount(client().prepareCount().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5l);
+ }
+
+ @Test
+ public void idsFilterTestsIdIndexed() throws Exception {
+ idsFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void idsFilterTestsIdNotIndexed() throws Exception {
+ idsFilterTests("no");
+ }
+
+ private void idsFilterTests(String index) throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_id").field("index", index).endObject()
+ .endObject().endObject()));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+
+ CountResponse countResponse = client().prepareCount().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // no type
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(idsFilter().ids("1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(idsQuery("type1").ids("1", "3")).get();
+ assertHitCount(countResponse, 2l);
+
+ // no type
+ countResponse = client().prepareCount().setQuery(idsQuery().ids("1", "3")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(idsQuery("type1").ids("7", "10")).get();
+ assertHitCount(countResponse, 0l);
+
+ // repeat..., with terms
+ countResponse = client().prepareCount().setTypes("type1").setQuery(constantScoreQuery(termsFilter("_id", "1", "3"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1_2"),
+ client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"),
+ client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4"));
+
+ CountResponse countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), limitFilter(2))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void filterExistsMissingTests() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
+
+ CountResponse countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(existsFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("_exists_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("field3"))).get();
+ assertHitCount(countResponse, 1l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsFilter("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(missingFilter("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("_missing_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingFilter("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void passQueryAsJSONStringTest() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareCount().setQuery(wrapper).get(), 1l);
+
+ BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"));
+ assertHitCount(client().prepareCount().setQuery(bool).get(), 1l);
+ }
+
+ @Test
+ public void testFiltersWithCustomCacheKey() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryNumeric() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d).get();
+ client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d).get();
+ client().prepareIndex("test", "type1", "3").setSource("long", 3l, "double", 3.0d).get();
+ refresh();
+ CountResponse countResponse = client().prepareCount().setQuery(matchQuery("long", "1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(matchQuery("double", "2")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value4", "field3", "value3").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2").get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder builder = QueryBuilders.multiMatchQuery("value1 value2 value4", "field1", "field2");
+ CountResponse countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field2")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 1l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field3^1.5")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ refresh();
+ builder = QueryBuilders.multiMatchQuery("value1").field("field1").field("field3", 1.5f)
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 2l);
+
+ // Test lenient
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value7", "field2", "value8", "field4", 5).get();
+ refresh();
+
+ builder = QueryBuilders.multiMatchQuery("value1", "field1", "field2", "field4");
+ builder.lenient(true);
+ countResponse = client().prepareCount().setQuery(builder).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryZeroTermsQuery() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE));
+ CountResponse countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together
+ CountResponse countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(multiMatchQuery("value4", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ boolQuery = boolQuery().must(multiMatchQuery("a", "field1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ countResponse = client().prepareCount().setQuery(boolQuery).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryMinShouldMatch() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("field1", new String[]{"value1", "value2", "value3"}).get();
+ client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2");
+
+ multiMatchQuery.useDisMax(true);
+ multiMatchQuery.minimumShouldMatch("70%");
+ CountResponse countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 2l);
+
+ multiMatchQuery.useDisMax(false);
+ multiMatchQuery.minimumShouldMatch("70%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 2l);
+
+ multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1");
+ multiMatchQuery.minimumShouldMatch("100%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 0l);
+
+ multiMatchQuery.minimumShouldMatch("70%");
+ countResponse = client().prepareCount().setQuery(multiMatchQuery).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testFuzzyQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("str:kimcy~1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:11~1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("date:2012-02-02~1d")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testSpecialRangeSyntaxInQueryString() {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(queryString("num:>19")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>20")).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>=20")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:>11")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:<20")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(queryString("num:<=20")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryString("+num:>11 +num:<20")).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testEmptyTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "terms", "type=string"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("term", "1").get();
+ client().prepareIndex("test", "type", "2").setSource("term", "2").get();
+ client().prepareIndex("test", "type", "3").setSource("term", "3").get();
+ client().prepareIndex("test", "type", "4").setSource("term", "4").get();
+ refresh();
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("term", new String[0]))).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), idsFilter())).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testTermsLookupFilter() throws Exception {
+ assertAcked(prepareCreate("lookup").addMapping("type", "terms", "type=string", "other", "type=string"));
+ assertAcked(prepareCreate("lookup2").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("arr").startObject("properties").startObject("term").field("type", "string")
+ .endObject().endObject().endObject().endObject().endObject().endObject()));
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}),
+ client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}),
+ client().prepareIndex("lookup", "type", "3").setSource("terms", new String[]{"2", "4"}),
+ client().prepareIndex("lookup", "type", "4").setSource("other", "value"),
+ client().prepareIndex("lookup2", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "1").endObject()
+ .startObject().field("term", "3").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "2").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "3").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .startObject().field("term", "4").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4"));
+
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // same as above, just on the _id...
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("_id").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // another search with same parameters...
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("2").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("3").lookupPath("terms"))
+ ).get();
+ assertNoFailures(countResponse);
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("4").lookupPath("terms"))).get();
+ assertHitCount(countResponse, 0l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("1").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("2").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("not_exists").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))).get();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void testBasicFilterById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1", "type2").ids("1", "2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter().ids("1", "2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1").ids("1", "2"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter().ids("1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter(null).ids("1"))).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.idsFilter("type1", "type2", "type3").ids("1", "2", "3", "4"))).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testBasicQueryById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1", "type2").ids("1", "2")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1", "2")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1").ids("1", "2")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery().ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery(null).ids("1")).get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4")).get();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void testNumericTermsAndRanges() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1,
+ "num_long", 1, "num_float", 1, "num_double", 1).get();
+
+ client().prepareIndex("test", "type1", "2").setSource("num_byte", 2, "num_short", 2, "num_integer", 2,
+ "num_long", 2, "num_float", 2, "num_double", 2).get();
+
+ client().prepareIndex("test", "type1", "17").setSource("num_byte", 17, "num_short", 17, "num_integer", 17,
+ "num_long", 17, "num_float", 17, "num_double", 17).get();
+ refresh();
+
+ CountResponse countResponse;
+ logger.info("--> term query on 1");
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_byte", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_short", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_integer", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_long", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_float", 1)).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termQuery("num_double", 1)).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> terms query on 1");
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_byte", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_short", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_integer", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_long", new int[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_float", new double[]{1})).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(termsQuery("num_double", new double[]{1})).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> term filter on 1");
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_byte", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_short", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_integer", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_long", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_float", 1))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_double", 1))).get();
+ assertHitCount(countResponse, 1l);
+
+ logger.info("--> terms filter on 1");
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_byte", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_short", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_integer", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_long", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_float", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ countResponse = client().prepareCount("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_double", new int[]{1}))).get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test // see #2994
+ public void testSimpleSpan() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar").get();
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything").get();
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other").get();
+ client().prepareIndex("test", "test", "4").setSource("description", "foo").get();
+ refresh();
+
+ CountResponse response = client().prepareCount("test")
+ .setQuery(QueryBuilders.spanOrQuery().clause(QueryBuilders.spanTermQuery("description", "bar"))).get();
+ assertHitCount(response, 1l);
+ response = client().prepareCount("test")
+ .setQuery(QueryBuilders.spanOrQuery().clause(QueryBuilders.spanTermQuery("test.description", "bar"))).get();
+ assertHitCount(response, 1l);
+
+ response = client().prepareCount("test").setQuery(
+ QueryBuilders.spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "foo"))
+ .clause(QueryBuilders.spanTermQuery("test.description", "other"))
+ .slop(3)).get();
+ assertHitCount(response, 3l);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
new file mode 100644
index 0000000..0c3f18b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.count.simple;
+
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class SimpleCountTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testCountRandomPreference() throws InterruptedException, ExecutionException {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 3))).get();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomUnicodeOfLengthBetween(0, 4)).get();
+ assertHitCount(countResponse, 6l);
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryString("_id:XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ countResponse = client().prepareCount().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryString("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateMathTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ CountResponse countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d")).execute().actionGet();
+ assertNoFailures(countResponse);
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.queryString("field:[2010-01-03||+2d TO 2010-01-04||+2d]")).execute().actionGet();
+ assertHitCount(countResponse, 2l);
+ }
+
+ @Test
+ public void localDependentDateTests() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 10l);
+
+
+ countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 20l);
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java
new file mode 100644
index 0000000..ef394bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deleteByQuery;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class DeleteByQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDeleteAllNoIndices() {
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+ deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false));
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.getIndices().size(), equalTo(0));
+ }
+
+ @Test
+ public void testDeleteAllOneIndex() {
+
+ String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}";
+
+ client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(1l));
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.status(), equalTo(RestStatus.OK));
+ assertThat(actionGet.getIndex("twitter"), notNullValue());
+ assertThat(actionGet.getIndex("twitter").getFailedShards(), equalTo(0));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testMissing() {
+
+ String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}";
+
+ client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(1l));
+ DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery();
+ deleteByQueryRequestBuilder.setIndices("twitter", "missing");
+ deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+
+ try {
+ deleteByQueryRequestBuilder.execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ //everything well
+ }
+
+ deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.lenient());
+ DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet();
+ assertThat(actionGet.status(), equalTo(RestStatus.OK));
+ assertThat(actionGet.getIndex("twitter").getFailedShards(), equalTo(0));
+ assertThat(actionGet.getIndex("twitter"), notNullValue());
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(search.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testFailure() throws Exception {
+ client().admin().indices().prepareCreate("twitter").execute().actionGet();
+
+ DeleteByQueryResponse response = client().prepareDeleteByQuery("twitter")
+ .setQuery(QueryBuilders.hasChildQuery("type", QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+
+ assertThat(response.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(response.getIndex("twitter").getSuccessfulShards(), equalTo(0));
+ assertThat(response.getIndex("twitter").getFailedShards(), equalTo(5));
+ assertThat(response.getIndices().size(), equalTo(1));
+ assertThat(response.getIndices().get("twitter").getFailedShards(), equalTo(5));
+ assertThat(response.getIndices().get("twitter").getFailures().length, equalTo(5));
+ for (ShardOperationFailedException failure : response.getIndices().get("twitter").getFailures()) {
+ assertThat(failure.reason(), containsString("[twitter] [has_child] No mapping for for type [type]"));
+ assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(failure.shardId(), greaterThan(-1));
+ }
+ }
+
+ @Test
+ public void testDeleteByFieldQuery() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ int numDocs = atLeast(10);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i))
+ .setRouting(randomAsciiOfLengthBetween(1, 5))
+ .setSource("foo", "bar").get();
+ }
+ refresh();
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(), 1);
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs);
+ client().prepareDeleteByQuery("test")
+ .setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1))))
+ .execute().actionGet();
+ refresh();
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs - 1);
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java b/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
new file mode 100644
index 0000000..2ba71a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.jackson;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JacksonLocationTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLocationExtraction() throws IOException {
+ // {
+ // "index" : "test",
+ // "source" : {
+ // value : "something"
+ // }
+ // }
+ BytesStreamOutput os = new BytesStreamOutput();
+ JsonGenerator gen = new JsonFactory().createGenerator(os);
+ gen.writeStartObject();
+
+ gen.writeStringField("index", "test");
+
+ gen.writeFieldName("source");
+ gen.writeStartObject();
+ gen.writeStringField("value", "something");
+ gen.writeEndObject();
+
+ gen.writeEndObject();
+
+ gen.close();
+
+ byte[] data = os.bytes().toBytes();
+ JsonParser parser = new JsonFactory().createParser(data);
+
+ assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "index"
+ assertThat(parser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "source"
+// JsonLocation location1 = parser.getCurrentLocation();
+// parser.skipChildren();
+// JsonLocation location2 = parser.getCurrentLocation();
+//
+// byte[] sourceData = new byte[(int) (location2.getByteOffset() - location1.getByteOffset())];
+// System.arraycopy(data, (int) location1.getByteOffset(), sourceData, 0, sourceData.length);
+//
+// JsonParser sourceParser = new JsonFactory().createJsonParser(new FastByteArrayInputStream(sourceData));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.START_OBJECT));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "value"
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.END_OBJECT));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
new file mode 100644
index 0000000..c306b55
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.joda;
+
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTimeZone;
+import org.joda.time.MutableDateTime;
+import org.joda.time.format.*;
+import org.junit.Test;
+
+import java.util.Date;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleJodaTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMultiParsers() {
+ DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
+ DateTimeParser[] parsers = new DateTimeParser[3];
+ parsers[0] = DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[1] = DateTimeFormat.forPattern("MM-dd-yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[2] = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC).getParser();
+ builder.append(DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getPrinter(), parsers);
+
+ DateTimeFormatter formatter = builder.toFormatter();
+
+ formatter.parseMillis("2009-11-15 14:12:12");
+ }
+
+ @Test
+ public void testIsoDateFormatDateTimeNoMillisUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testUpperBound() {
+ MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+
+ String value = "2000-01-01";
+ int i = formatter.parseInto(dateTime, value, 0);
+ assertThat(i, equalTo(value.length()));
+ assertThat(dateTime.toString(), equalTo("2000-01-01T23:59:59.999Z"));
+ }
+
+ @Test
+ public void testIsoDateFormatDateOptionalTimeUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.001Z");
+ assertThat(millis, equalTo(1l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1Z");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01");
+ assertThat(millis, equalTo(0l));
+
+ millis = formatter.parseMillis("1970");
+ assertThat(millis, equalTo(0l));
+
+ try {
+ formatter.parseMillis("1970 kuku");
+ fail("formatting should fail");
+ } catch (IllegalArgumentException e) {
+ // all is well
+ }
+
+ // test offset in format
+ millis = formatter.parseMillis("1970-01-01T00:00:00-02:00");
+ assertThat(millis, equalTo(TimeValue.timeValueHours(2).millis()));
+ }
+
+ @Test
+ public void testIsoVsCustom() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC);
+ millis = formatter.parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ FormatDateTimeFormatter formatter2 = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ millis = formatter2.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testWriteAndParse() {
+ DateTimeFormatter dateTimeWriter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ Date date = new Date();
+ assertThat(formatter.parseMillis(dateTimeWriter.print(date.getTime())), equalTo(date.getTime()));
+ }
+
+ @Test
+ public void testSlashInFormat() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("MM/yyyy");
+ formatter.parser().parseMillis("01/2001");
+
+ formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ formatter.printer().print(millis);
+
+ try {
+ millis = formatter.parser().parseMillis("1970/01/01");
+ fail();
+ } catch (IllegalArgumentException e) {
+ // it really can't parse this one
+ }
+ }
+
+ @Test
+ public void testMultipleFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat("1970/01/01 00:00:00", is(formatter.printer().print(millis)));
+ }
+
+ @Test
+ public void testMultipleDifferentFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ String input = "1970/01/01 00:00:00";
+ long millis = formatter.parser().parseMillis(input);
+ assertThat(input, is(formatter.printer().print(millis)));
+
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||dateOptionalTime");
+ Joda.forPattern("dateOptionalTime||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||dateOptionalTime||yyyy/MM/dd");
+ Joda.forPattern("date_time||date_time_no_millis");
+ Joda.forPattern(" date_time || date_time_no_millis");
+ }
+
+ @Test
+ public void testInvalidPatterns() {
+ expectInvalidPattern("does_not_exist_pattern", "Invalid format: [does_not_exist_pattern]: Illegal pattern component: o");
+ expectInvalidPattern("OOOOO", "Invalid format: [OOOOO]: Illegal pattern component: OOOOO");
+ expectInvalidPattern(null, "No date pattern provided");
+ expectInvalidPattern("", "No date pattern provided");
+ expectInvalidPattern(" ", "No date pattern provided");
+ expectInvalidPattern("||date_time_no_millis", "No date pattern provided");
+ expectInvalidPattern("date_time_no_millis||", "No date pattern provided");
+ }
+
+ private void expectInvalidPattern(String pattern, String errorMessage) {
+ try {
+ Joda.forPattern(pattern);
+ fail("Pattern " + pattern + " should have thrown an exception but did not");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(errorMessage));
+ }
+ }
+
+ @Test
+ public void testRounding() {
+ long TIME = utcTimeInMillis("2009-02-03T01:01:01");
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setMillis(TIME);
+ assertThat(time.monthOfYear().roundFloor().toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.hourOfDay().roundFloor().toString(), equalTo("2009-02-03T01:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.dayOfMonth().roundFloor().toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ }
+
+ @Test
+ public void testRoundingSetOnTime() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().monthOfYear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-01T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-05-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-05-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-05-01T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-03T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-02-02T23:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-02T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().weekOfWeekyear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2011-05-05T01:01:01"));
+ assertThat(time.toString(), equalTo("2011-05-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2011-05-02T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testRoundingWithTimeZone() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setZone(DateTimeZone.forOffsetHours(-2));
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC);
+ utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates
+ // time zone, the millis diff is not 24, but 22 hours
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+
+ time.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+ }
+
+ private long utcTimeInMillis(String time) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC).parseMillis(time);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
new file mode 100644
index 0000000..e8fa166
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleLuceneTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSortValues() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ for (int i = 0; i < 10; i++) {
+ Document document = new Document();
+ document.add(new TextField("str", new String(new char[]{(char) (97 + i), (char) (97 + i)}), Field.Store.YES));
+ indexWriter.addDocument(document);
+ }
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, 10, new Sort(new SortField("str", SortField.Type.STRING)));
+ for (int i = 0; i < 10; i++) {
+ FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
+ assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
+ }
+ }
+
+ @Test
+ public void testAddDocAfterPrepareCommit() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+ assertThat(reader.numDocs(), equalTo(1));
+
+ indexWriter.prepareCommit();
+ // Returns null b/c no changes.
+ assertThat(DirectoryReader.openIfChanged(reader), equalTo(null));
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ indexWriter.addDocument(document);
+ indexWriter.commit();
+ reader = DirectoryReader.openIfChanged(reader);
+ assertThat(reader.numDocs(), equalTo(2));
+ }
+
+ @Test
+ public void testSimpleNumericOps() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new IntField("test", 2, IntField.TYPE_STORED));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ IndexableField f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ BytesRef bytes = new BytesRef();
+ NumericUtils.intToPrefixCoded(2, 0, bytes);
+ topDocs = searcher.search(new TermQuery(new Term("test", bytes)), 1);
+ doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ indexWriter.close();
+ }
+
+ /**
+ * Here, we verify that the order that we add fields to a document counts, and not the lexi order
+ * of the field. This means that heavily accessed fields that use field selector should be added
+ * first (with load and break).
+ */
+ @Test
+ public void testOrdering() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("#id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ final ArrayList<String> fieldsOrder = new ArrayList<String>();
+ searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() {
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ fieldsOrder.add(fieldInfo.name);
+ return Status.YES;
+ }
+ });
+
+ assertThat(fieldsOrder.size(), equalTo(2));
+ assertThat(fieldsOrder.get(0), equalTo("_id"));
+ assertThat(fieldsOrder.get(1), equalTo("#id"));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ for (int i = 0; i < 100; i++) {
+ // TODO (just setting the boost value does not seem to work...)
+ StringBuilder value = new StringBuilder().append("value");
+ for (int j = 0; j < i; j++) {
+ value.append(" ").append("value");
+ }
+ Document document = new Document();
+ TextField textField = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ textField = new TextField("value", value.toString(), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ indexWriter.addDocument(document);
+ }
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TermQuery query = new TermQuery(new Term("value", "value"));
+ TopDocs topDocs = searcher.search(query, 100);
+ assertThat(100, equalTo(topDocs.totalHits));
+ for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+ Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
+// System.out.println(doc.get("id") + ": " + searcher.explain(query, topDocs.scoreDocs[i].doc));
+ assertThat(doc.get("_id"), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNRTSearchOnClosedWriter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+
+ for (int i = 0; i < 100; i++) {
+ Document document = new Document();
+ TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ field.setBoost(i);
+ document.add(field);
+ indexWriter.addDocument(document);
+ }
+ reader = refreshReader(reader);
+
+ indexWriter.close();
+
+ TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator(null);
+ termDocs.next();
+ }
+
+ /**
+ * A test just to verify that term freqs are not stored for numeric fields. <tt>int1</tt> is not storing termFreq
+ * and <tt>int2</tt> does.
+ */
+ @Test
+ public void testNumericTermDocsFreqs() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ FieldType type = IntField.TYPE_NOT_STORED;
+ IntField field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ type = new FieldType(IntField.TYPE_NOT_STORED);
+ type.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS);
+ type.freeze();
+
+ field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
+
+ Terms terms = atomicReader.terms("int1");
+ TermsEnum termsEnum = terms.iterator(null);
+ termsEnum.next();
+
+ DocsEnum termDocs = termsEnum.docs(atomicReader.getLiveDocs(), null);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(1));
+
+ terms = atomicReader.terms("int2");
+ termsEnum = terms.iterator(termsEnum);
+ termsEnum.next();
+ termDocs = termsEnum.docs(atomicReader.getLiveDocs(), termDocs);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(2));
+
+ reader.close();
+ indexWriter.close();
+ }
+
+ private DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
+ DirectoryReader oldReader = reader;
+ reader = DirectoryReader.openIfChanged(reader);
+ if (reader != oldReader) {
+ oldReader.close();
+ }
+ return reader;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
new file mode 100644
index 0000000..1d5e7cc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
+import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class VectorHighlighterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVectorHighlighter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ assertThat(fragment, equalTo("the big <b>bad</b> dog"));
+ }
+
+ @Test
+ public void testVectorHighlighterPrefixQuery() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+
+ PrefixQuery prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT.getClass().getName()));
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(prefixQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+
+ prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+ Query rewriteQuery = prefixQuery.rewrite(reader);
+ fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+
+ // now check with the custom field query
+ prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT.getClass().getName()));
+ fragment = highlighter.getBestFragment(new CustomFieldQuery(prefixQuery, reader, highlighter),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoStore() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoTermVector() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java
new file mode 100644
index 0000000..6a3fc26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/DiscoveryTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope=Scope.SUITE, numNodes=2)
+public class DiscoveryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("discovery.zen.ping.multicast.enabled", false)
+ .put("discovery.zen.ping.unicast.hosts", "localhost").put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "Proposed fix: Each node maintains a list of endpoints that have pinged it " +
+ "(UnicastZenPing#temporalResponses), a node will remove entries that are old. We can use this list to extend " +
+ "'discovery.zen.ping.unicast.hosts' list of nodes to ping. If we do this then in the test both nodes will ping each " +
+ "other, like in solution 1. The upside compared to solution 1, is that it won't go and ping 100 endpoints (based on the default port range), " +
+ "just other nodes that have pinged it in addition to the already configured nodes in the 'discovery.zen.ping.unicast.hosts' list.")
+ public void testUnicastDiscovery() {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java b/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
new file mode 100644
index 0000000..ad98754
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.multicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.junit.Test;
+
+import java.net.DatagramPacket;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MulticastZenPingTests extends ElasticsearchTestCase {
+
+ private Settings buildRandomMulticast(Settings settings) {
+ ImmutableSettings.Builder builder = ImmutableSettings.builder().put(settings);
+ builder.put("discovery.zen.ping.multicast.group", "224.2.3." + randomIntBetween(0, 255));
+ builder.put("discovery.zen.ping.multicast.port", randomIntBetween(55000, 56000));
+ return builder.build();
+ }
+
+ @Test
+ public void testSimplePings() {
+ Settings settings = ImmutableSettings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ final TransportService transportServiceB = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ MulticastZenPing zenPingB = new MulticastZenPing(threadPool, transportServiceB, clusterName, Version.CURRENT);
+ zenPingB.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].target().id(), equalTo("B"));
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ threadPool.shutdown();
+ }
+ }
+
+ @Test
+ public void testExternalPing() throws Exception {
+ Settings settings = ImmutableSettings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ MulticastSocket multicastSocket = null;
+ try {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("TRACE");
+ multicastSocket = new MulticastSocket(54328);
+ multicastSocket.setReceiveBufferSize(2048);
+ multicastSocket.setSendBufferSize(2048);
+ multicastSocket.setSoTimeout(60000);
+
+ DatagramPacket datagramPacket = new DatagramPacket(new byte[2048], 2048, InetAddress.getByName("224.2.2.4"), 54328);
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("request").field("cluster_name", "test").endObject().endObject();
+ datagramPacket.setData(builder.bytes().toBytes());
+ multicastSocket.send(datagramPacket);
+ Thread.sleep(100);
+ } finally {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("INFO");
+ if (multicastSocket != null) multicastSocket.close();
+ zenPingA.close();
+ threadPool.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java b/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
new file mode 100644
index 0000000..7a553e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.unicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class UnicastZenPingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimplePings() {
+ Settings settings = ImmutableSettings.EMPTY;
+ int startPort = 11000 + randomIntBetween(0, 1000);
+ int endPort = startPort + 10;
+ settings = ImmutableSettings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+
+ ThreadPool threadPool = new ThreadPool();
+ ClusterName clusterName = new ClusterName("test");
+ NetworkService networkService = new NetworkService(settings);
+
+ NettyTransport transportA = new NettyTransport(settings, threadPool, networkService, Version.CURRENT);
+ final TransportService transportServiceA = new TransportService(transportA, threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("UZP_A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressA = (InetSocketTransportAddress) transportA.boundAddress().publishAddress();
+
+ NettyTransport transportB = new NettyTransport(settings, threadPool, networkService, Version.CURRENT);
+ final TransportService transportServiceB = new TransportService(transportB, threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("UZP_B", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressB = (InetSocketTransportAddress) transportB.boundAddress().publishAddress();
+
+ Settings hostsSettings = ImmutableSettings.settingsBuilder().putArray("discovery.zen.ping.unicast.hosts",
+ addressA.address().getAddress().getHostAddress() + ":" + addressA.address().getPort(),
+ addressB.address().getAddress().getHostAddress() + ":" + addressB.address().getPort())
+ .build();
+
+ UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, transportServiceA, clusterName, Version.CURRENT, null);
+ zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingA.start();
+
+ UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, transportServiceB, clusterName, Version.CURRENT, null);
+ zenPingB.setNodesProvider(new DiscoveryNodesProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].target().id(), equalTo("UZP_B"));
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ threadPool.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java b/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
new file mode 100644
index 0000000..727649e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ *
+ */
+public class AliasedIndexDocumentActionsTests extends DocumentActionsTests {
+
+ protected void createIndex() {
+ logger.info("Creating index [test1] with alias [test]");
+ try {
+ client().admin().indices().prepareDelete("test1").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ logger.info("--> creating index test");
+ client().admin().indices().create(createIndexRequest("test1").settings(settingsBuilder().putArray("index.aliases", "test"))).actionGet();
+ }
+
+ @Override
+ protected String getConcreteIndexName() {
+ return "test1";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/document/BulkTests.java b/src/test/java/org/elasticsearch/document/BulkTests.java
new file mode 100644
index 0000000..53f9472
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/BulkTests.java
@@ -0,0 +1,593 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CyclicBarrier;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class BulkTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testBulkUpdate_simple() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 2).setCreate(true))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 3))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("4").setSource("field", 4))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("5").setSource("field", 5))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setScript("ctx._source.field += 1"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1").setRetryOnConflict(3))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setDoc(jsonBuilder().startObject().field("field1", "test").endObject()))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("1"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("3"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(2l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").setFields("field1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("test"));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("6").setScript("ctx._source.field += 1")
+ .setUpsert(jsonBuilder().startObject().field("field", 0).endObject()))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("7").setScript("ctx._source.field += 1"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("6"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertThat(bulkResponse.getItems()[1].getResponse(), nullValue());
+ assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("DocumentMissingException"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(0l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(3l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(4l));
+ }
+
+ @Test
+ public void testBulkVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "2").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "1").setSource("field", "2")).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(1l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "1").setVersion(4l).setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "2").setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(10l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(10l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(12l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "e1").setVersion(4l).setDoc("field", "2").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareUpdate("test", "type", "e2").setDoc("field", "2").setVersion(15).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareUpdate("test", "type", "e1").setVersion(2l).setDoc("field", "3").setVersion(15).setVersionType(VersionType.EXTERNAL)).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(15l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(15l));
+ }
+
+ @Test
+ public void testBulkUpdate_malformedScripts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 1))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setScript("ctx._source.field += a").setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setScript("ctx._source.field += 1").setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setScript("ctx._source.field += a").setFields("field"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(bulkResponse.getItems()[0].getFailure().getId(), equalTo("1"));
+ assertThat(bulkResponse.getItems()[0].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[0].getResponse(), nullValue());
+
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().field("field").getValue()), equalTo(2));
+ assertThat(bulkResponse.getItems()[1].getFailure(), nullValue());
+
+ assertThat(bulkResponse.getItems()[2].getFailure().getId(), equalTo("3"));
+ assertThat(bulkResponse.getItems()[2].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[2].getResponse(), nullValue());
+ }
+
+ @Test
+ public void testBulkUpdate_largerVolume() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ int numDocs = 2000;
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript("ctx._source.counter += 1").setFields("counter")
+ .setUpsert(jsonBuilder().startObject().field("counter", 1).endObject())
+ );
+ }
+
+ BulkResponse response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(1l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(1l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(1));
+
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat((Long) getResponse.getField("counter").getValue(), equalTo(1l));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ UpdateRequestBuilder updateBuilder = client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setFields("counter");
+ if (i % 2 == 0) {
+ updateBuilder.setScript("ctx._source.counter += 1");
+ } else {
+ updateBuilder.setDoc(jsonBuilder().startObject().field("counter", 2).endObject());
+ }
+ if (i % 3 == 0) {
+ updateBuilder.setRetryOnConflict(3);
+ }
+
+ builder.add(updateBuilder);
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(2l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(2));
+ }
+
+ builder = client().prepareBulk();
+ int maxDocs = numDocs / 2 + numDocs;
+ for (int i = (numDocs / 2); i < maxDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx._source.counter += 1")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(true));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ int id = i + (numDocs / 2);
+ if (i >= (numDocs / 2)) {
+ assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getFailure().getMessage(), containsString("DocumentMissingException"));
+ } else {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(3l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx.op = \"none\"")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx.op = \"delete\"")
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+ }
+ }
+
+ @Test
+ public void testBulkIndexingWhileInitializing() throws Exception {
+
+ int shards = 1 + randomInt(10);
+ int replica = randomInt(2);
+
+ cluster().ensureAtLeastNumNodes(1 + replica);
+
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", shards)
+ .put("index.number_of_replicas", replica)
+ ).execute().actionGet();
+
+ int numDocs = 5000;
+ int bulk = 50;
+ for (int i = 0; i < numDocs; ) {
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int j = 0; j < bulk; j++, i++) {
+ builder.add(client().prepareIndex("test", "type1", Integer.toString(i)).setSource("val", i));
+ }
+ logger.info("bulk indexing {}-{}", i - bulk, i - 1);
+ BulkResponse response = builder.get();
+ if (response.hasFailures()) {
+ fail(response.buildFailureMessage());
+ }
+ }
+
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateDocAsUpsertWithParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{ \"update\" : { \"_index\" : \"test\", \"_type\" : \"child\", \"_id\" : \"child1\", \"parent\" : \"parent1\"}}\n" +
+ "{\"doc\" : { \"field1\" : \"value1\"}, \"doc_as_upsert\" : \"true\"}\n").array();
+
+ builder.add(addParent, 0, addParent.length, false);
+ builder.add(addChild, 0, addChild.length, false);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ //we check that the _parent field was set on the child document by using the has parent query
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateUpsertWithParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{\"update\" : { \"_id\" : \"child1\", \"_type\" : \"child\", \"_index\" : \"test\", \"parent\" : \"parent1\"} }\n" +
+ "{ \"script\" : \"ctx._source.field2 = 'value2'\", \"upsert\" : {\"field1\" : \"value1\"}}\n").array();
+
+ builder.add(addParent, 0, addParent.length, false);
+ builder.add(addChild, 0, addChild.length, false);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ @Test
+ public void testFailingVersionedUpdatedOnBulk() throws Exception {
+ createIndex("test");
+ index("test","type","1","field","1");
+ final BulkResponse[] responses = new BulkResponse[30];
+ final CyclicBarrier cyclicBarrier = new CyclicBarrier(responses.length);
+ Thread[] threads = new Thread[responses.length];
+
+
+ for (int i=0;i<responses.length;i++) {
+ final int threadID = i;
+ threads[threadID] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cyclicBarrier.await();
+ } catch (Exception e) {
+ return;
+ }
+ BulkRequestBuilder requestBuilder = client().prepareBulk();
+ requestBuilder.add(client().prepareUpdate("test", "type", "1").setVersion(1).setDoc("field", threadID));
+ responses[threadID]=requestBuilder.get();
+
+ }
+ });
+ threads[threadID].start();
+
+ }
+
+ for (int i=0;i < threads.length; i++) {
+ threads[i].join();
+ }
+
+ int successes = 0;
+ for (BulkResponse response : responses) {
+ if (!response.hasFailures()) successes ++;
+ }
+
+ assertThat(successes, equalTo(1));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToMappingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", true)
+ .field("path", "last_modified")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+
+ String brokenBuildRequestData = "{\"index\": {\"_id\": \"1\"}}\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {\"_id\": \"2\"}}\n" +
+ "{\"name\": \"Good\", \"last_modified\" : \"2013-04-05\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "2"));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToRoutingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_routing")
+ .field("required", true)
+ .field("path", "my_routing")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": { \"_id\" : \"24000\" } }\n" +
+ "{\"name\": \"Good\", \"my_routing\" : \"48000\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(client().prepareGet("test", "type", "24000").setRouting("48000").get());
+ }
+
+
+ @Test // issue 4745
+ public void preParsingSourceDueToIdShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_id")
+ .field("path", "my_id")
+ .endObject()
+ .endObject()
+ .endObject();
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type", builder).get();
+ assertAcked(createIndexResponse);
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {} }\n" +
+ "{\"name\": \"Good\", \"my_id\" : \"48\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "48"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
new file mode 100644
index 0000000..ee9eb8d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;
+import org.elasticsearch.action.support.replication.ReplicationType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class DocumentActionsTests extends ElasticsearchIntegrationTest {
+
+ protected void createIndex() {
+ cluster().wipeIndices(getConcreteIndexName());
+ createIndex(getConcreteIndexName());
+ }
+
+
+ protected String getConcreteIndexName() {
+ return "test";
+ }
+
+ @Test
+ public void testIndexActions() throws Exception {
+ createIndex();
+ logger.info("Running Cluster Health");
+ ensureGreen();
+ logger.info("Indexing [type1/1]");
+ IndexResponse indexResponse = client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")).setRefresh(true).execute().actionGet();
+ assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ RefreshResponse refreshResponse = refresh();
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(10));
+
+ logger.info("--> index exists?");
+ assertThat(indexExists(getConcreteIndexName()), equalTo(true));
+ logger.info("--> index exists?, fake index");
+ assertThat(indexExists("test1234565"), equalTo(false));
+
+ logger.info("Clearing cache");
+ ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true).idCache(true)).actionGet();
+ assertNoFailures(clearIndicesCacheResponse);
+ assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(10));
+
+ logger.info("Optimizing");
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ OptimizeResponse optimizeResponse = optimize();
+ assertThat(optimizeResponse.getSuccessfulShards(), equalTo(10));
+
+ GetResponse getResult;
+
+ logger.info("Get [type1/1]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat("cycle(map) #" + i, (String) ((Map) getResult.getSourceAsMap().get("type1")).get("name"), equalTo("test"));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Get [type1/1] with script");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setFields("type1.name").execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(getResult.isExists(), equalTo(true));
+ assertThat(getResult.getSourceAsBytes(), nullValue());
+ assertThat(getResult.getField("type1.name").getValues().get(0).toString(), equalTo("test"));
+ }
+
+ logger.info("Get [type1/2] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Delete [type1/1]");
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").setReplicationType(ReplicationType.SYNC).execute().actionGet();
+ assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(deleteResponse.getId(), equalTo("1"));
+ assertThat(deleteResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Index [type1/1]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ logger.info("Index [type1/2]");
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet();
+
+ logger.info("Flushing");
+ FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
+ assertThat(flushResult.getSuccessfulShards(), equalTo(10));
+ assertThat(flushResult.getFailedShards(), equalTo(0));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] and [type1/2]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ String ste1 = getResult.getSourceAsString();
+ String ste2 = source("2", "test2").string();
+ assertThat("cycle #" + i, ste1, equalTo(ste2));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test").setQuery(termQuery("_type", "type1")).setOperationThreading(BroadcastOperationThreading.NO_THREADS).execute().actionGet();
+ assertNoFailures(countResponse);
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.SINGLE_THREAD)
+ .get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .setOperationThreading(BroadcastOperationThreading.THREAD_PER_SHARD).get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ // test failed (simply query that can't be parsed)
+ countResponse = client().count(countRequest("test").source("{ term : { _type : \"type1 } }".getBytes(Charsets.UTF_8))).actionGet();
+
+ assertThat(countResponse.getCount(), equalTo(0l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(countResponse.getFailedShards(), equalTo(5));
+
+ // count with no query is a match all one
+ countResponse = client().prepareCount("test").execute().actionGet();
+ assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ logger.info("Delete by query");
+ DeleteByQueryResponse queryResponse = client().prepareDeleteByQuery().setIndices("test").setQuery(termQuery("name", "test2")).execute().actionGet();
+ assertThat(queryResponse.getIndex(getConcreteIndexName()).getSuccessfulShards(), equalTo(5));
+ assertThat(queryResponse.getIndex(getConcreteIndexName()).getFailedShards(), equalTo(0));
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] and [type1/2], should be empty");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat("cycle #" + i, getResult.isExists(), equalTo(false));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+ }
+
+ @Test
+ public void testBulk() throws Exception {
+ createIndex();
+ logger.info("-> running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource(source("2", "test")).setCreate(true))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource(source("3", "test")))
+ .add(client().prepareDelete().setIndex("test").setType("type1").setId("1"))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource("{ xxx }")) // failure
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index"));
+ assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[0].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[1].getId(), equalTo("2"));
+
+ assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1"));
+ String generatedId3 = bulkResponse.getItems()[2].getId();
+
+ assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete"));
+ assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[3].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true));
+ assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1"));
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertNoFailures(refreshResponse);
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(10));
+
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.isExists(), equalTo(false));
+
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("2", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+
+ getResult = client().get(getRequest("test").type("type1").id(generatedId3)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("3", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1").field("id", id).field("name", nameValue).endObject().endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
new file mode 100644
index 0000000..31473ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.explain;
+
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.queryString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ExplainActionTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSimple() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(
+ ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource("field", "value1")
+ .execute().actionGet();
+
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertFalse(response.isExists()); // not a match b/c not realtime
+ assertFalse(response.isMatch()); // not a match b/c not realtime
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.termQuery("field", "value2"))
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.boolQuery()
+ .must(QueryBuilders.termQuery("field", "value1"))
+ .must(QueryBuilders.termQuery("field", "value2"))
+ )
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getDetails().length, equalTo(2));
+
+ response = client().prepareExplain("test", "test", "2")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertFalse(response.isExists());
+ assertFalse(response.isMatch());
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWithFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(true));
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1")
+ .setFetchSource(true)
+ .get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(false));
+
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1", "obj1.field2")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ String v1 = (String) response.getGetResult().field("obj1.field1").getValue();
+ String v2 = (String) response.getGetResult().field("obj1.field2").getValue();
+ assertThat(v1, equalTo("value1"));
+ assertThat(v2, equalTo("value2"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWitSource() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ ExplainResponse response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource("obj1.field1", null)
+ .get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getSource().size(), equalTo(1));
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+
+ response = client().prepareExplain("test", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource(null, "obj1.field2")
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+ }
+
+
+ @Test
+ public void testExplainWithAlias() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAlias("test", "alias1", FilterBuilders.termFilter("field2", "value2"))
+ .execute().actionGet();
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ ExplainResponse response = client().prepareExplain("alias1", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute().actionGet();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ }
+
+ @Test
+ public void explainDateRangeInQueryString() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).get();
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "type", "1").setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java b/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java
new file mode 100644
index 0000000..c0b4930
--- /dev/null
+++ b/src/test/java/org/elasticsearch/flt/FuzzyLikeThisActionTests.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.flt;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.fuzzyLikeThisFieldQuery;
+import static org.elasticsearch.index.query.QueryBuilders.fuzzyLikeThisQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class FuzzyLikeThisActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // See issue https://github.com/elasticsearch/elasticsearch/issues/3252
+ public void testNumericField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)))
+ .addMapping("type", "int_value", "type=integer"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", "type", "2")
+ .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ // flt query with no field -> OK
+ SearchResponse searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery().likeText("index")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt query with string fields
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value").likeText("index")).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt query with at least a numeric field -> fail by default
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index")), SearchPhaseExecutionException.class);
+
+ // flt query with at least a numeric field -> fail by command
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+
+ // flt query with at least a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(false)).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L));
+
+ // flt field query on a numeric field -> failure by default
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42")), SearchPhaseExecutionException.class);
+
+ // flt field query on a numeric field -> failure by command
+ assertThrows(client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+ // flt field query on a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(fuzzyLikeThisFieldQuery("int_value").likeText("42").failOnUnsupportedField(false)).execute().actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0L));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java b/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java
new file mode 100644
index 0000000..57b8a28
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.fs;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.SetOnce;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class IndexGatewayTests extends ElasticsearchIntegrationTest {
+
+ private String storeType;
+ private final SetOnce<Settings> settings = new SetOnce<Settings>();
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ if (settings.get() == null) {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("cluster.routing.schedule", "100ms");
+ builder.put("gateway.type", "fs");
+ if (between(0, 5) == 0) {
+ builder.put("gateway.fs.buffer_size", between(1, 100) + "kb");
+ }
+ if (between(0, 5) == 0) {
+ builder.put("gateway.fs.chunk_size", between(1, 100) + "kb");
+ }
+ builder.put("index.number_of_replicas", "1");
+ builder.put("index.number_of_shards", rarely() ? Integer.toString(between(2, 6)) : "1");
+ storeType = rarely() ? "ram" : "fs";
+ builder.put("index.store.type", storeType);
+ settings.set(builder.build());
+ }
+ return settings.get();
+ }
+
+
+ protected boolean isPersistentStorage() {
+ assertNotNull(storeType);
+ return "fs".equals(settings.get().get("index.store.type"));
+ }
+
+ @Test
+ @Slow
+ public void testSnapshotOperations() throws Exception {
+ cluster().startNode(nodeSettings(0));
+
+ // get the environment, so we can clear the work dir when needed
+ Environment environment = cluster().getInstance(Environment.class);
+
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // Translog tests
+
+ logger.info("Creating index [{}]", "test");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ // create a mapping
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type1").setSource(mappingSource()).execute().actionGet();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ // verify that mapping is there
+ ClusterStateResponse clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
+ assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
+
+ // create two and delete the first
+ logger.info("Indexing #1");
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ logger.info("Indexing #2");
+ client().index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+
+ // perform snapshot to the index
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Deleting #1");
+ client().delete(deleteRequest("test").type("type1").id("1")).actionGet();
+
+ // perform snapshot to the index
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ // do it again, it should be a no op
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (only translog should be populated)");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // verify that mapping is there
+ clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
+ assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
+
+ logger.info("Getting #1, should not exists");
+ GetResponse getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+
+ // Now flush and add some data (so we have index recovery as well)
+ logger.info("Flushing, so we have actual content in the index files (#2 should be in the index)");
+ client().admin().indices().flush(flushRequest("test")).actionGet();
+ logger.info("Indexing #3, so we have something in the translog as well");
+ client().index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))).actionGet();
+
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (from the translog)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
+ FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
+ logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (from the translog)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+
+ logger.info("Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
+ client().admin().indices().flush(flushRequest("test")).actionGet();
+
+ logger.info("Gateway Snapshot");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+ logger.info("Gateway Snapshot (should be a no op)");
+ client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
+
+ logger.info("Closing the server");
+ cluster().stopRandomNode();
+ logger.info("Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("Getting #1, should not exists");
+ getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ logger.info("Getting #2 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
+ logger.info("Getting #3 (not from the translog, but from the index)");
+ getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
+ assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
+
+ logger.info("Deleting the index");
+ client().admin().indices().delete(deleteIndexRequest("test")).actionGet();
+ }
+
+ @Test
+ @Nightly
+ public void testLoadWithFullRecovery() {
+ testLoad(true);
+ }
+
+ @Test
+ @Nightly
+ public void testLoadWithReuseRecovery() {
+ testLoad(false);
+ }
+
+ private void testLoad(boolean fullRecovery) {
+ logger.info("Running with fullRecover [{}]", fullRecovery);
+
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // get the environment, so we can clear the work dir when needed
+ Environment environment = cluster().getInstance(Environment.class);
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+
+ logger.info("--> refreshing and checking count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+
+ logger.info("--> indexing 1234 docs");
+ for (long i = 0; i < 1234; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setCreate(true) // make sure we use create, so if we recover wrongly, we will get increments...
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+
+ // snapshot every 100 so we get some actions going on in the gateway
+ if ((i % 11) == 0) {
+ client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+ }
+ // flush every once is a while, so we get different data
+ if ((i % 55) == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ logger.info("--> refreshing and checking count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1234l));
+
+
+ logger.info("--> closing the server");
+ cluster().stopRandomNode();
+ if (fullRecovery) {
+ logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
+ FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
+ logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
+ }
+
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("--> running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("--> done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ logger.info("--> checking count");
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1234l));
+
+ logger.info("--> checking reuse / recovery status");
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
+ for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
+ for (ShardStatus shardStatus : indexShardStatus) {
+ if (shardStatus.getShardRouting().primary()) {
+ if (fullRecovery || !isPersistentStorage()) {
+ assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), equalTo(0l));
+ } else {
+ assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(shardStatus.getGatewayRecoveryStatus().getIndexSize().bytes() - 8196 /* segments file and others */));
+ }
+ }
+ }
+ }
+ }
+
+ private String mappingSource() {
+ return "{ type1 : { properties : { name : { type : \"string\" } } } }";
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+
+ @Test
+ @Slow
+ public void testRandom() {
+ testLoad(randomBoolean());
+ }
+
+ @Test
+ @Slow
+ public void testIndexActions() throws Exception {
+ cluster().startNode(nodeSettings(0));
+
+ logger.info("Running Cluster Health (waiting for node to startup properly)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ assertAcked(client().admin().indices().create(createIndexRequest("test")).actionGet());
+
+ cluster().stopRandomNode();
+ cluster().startNode(nodeSettings(0));
+ assertTrue("index should exists", awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ try {
+ client().admin().indices().create(createIndexRequest("test")).actionGet();
+ return false;
+ } catch (IndexAlreadyExistsException e) {
+ // all is well
+ return true;
+ }
+ }
+ }));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java b/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java
new file mode 100644
index 0000000..5688e02
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/LocalGatewayIndexStateTests.java
@@ -0,0 +1,535 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.gateway.Gateway;
+import org.elasticsearch.test.*;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(LocalGatewayIndexStateTests.class);
+
+ @Test
+ public void testMappingMetaDataParsed() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+
+ logger.info("--> creating test index, with meta routing");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> waiting for yellow status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(5).setWaitForYellowStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ ClusterStateResponse response = client().admin().cluster().prepareState().execute().actionGet();
+ System.out.println("" + response);
+ }
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify meta _routing required exists");
+ MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+
+ logger.info("--> restarting nodes...");
+ cluster().fullRestart();
+
+ logger.info("--> waiting for yellow status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(5).setWaitForYellowStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ ClusterStateResponse response = client().admin().cluster().prepareState().execute().actionGet();
+ System.out.println("" + response);
+ }
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify meta _routing required exists");
+ mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+ }
+
+ @Test
+ public void testSimpleOpenClose() throws Exception {
+
+ logger.info("--> starting 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> creating test index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> creating another index (test2) by indexing into it");
+ client().prepareIndex("test2", "type1", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> opening the first index again...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> verifying that the state is green");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> trying to get the indexed document on the first index");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> restarting nodes...");
+ cluster().fullRestart();
+ logger.info("--> waiting for two nodes and green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(2));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> trying to get the indexed document on the first round (before close and shutdown)");
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1").execute().actionGet();
+ }
+
+ @Test
+ public void testJustMasterNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> closing master node");
+ cluster().closeNonSharedNodes(false);
+
+ logger.info("--> starting 1 master node non data again");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> waiting for test index to be created");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify we have an index");
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().setIndices("test").execute().actionGet();
+ assertThat(clusterStateResponse.getState().metaData().hasIndex("test"), equalTo(true));
+ }
+
+ @Test
+ public void testJustMasterNodeAndJustDataNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("node.master", false).put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> waiting for test index to be created");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").setWaitForYellowStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").setTimeout("100ms").execute().actionGet();
+ }
+
+ @Test
+ public void testTwoNodesSingleDoc() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build());
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> opening the index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testDanglingIndicesAutoImportYes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "yes")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // spin a bit waiting for the index to exists
+ long time = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) {
+ if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ break;
+ }
+ }
+
+ logger.info("--> verify that the dangling index exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesAutoImportClose() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "closed")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+
+
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // spin a bit waiting for the index to exists
+ long time = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) {
+ if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ break;
+ }
+ }
+
+ logger.info("--> verify that the dangling index exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the index state is closed");
+ assertThat(client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ logger.info("--> open the index");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesNoAutoImport() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "no")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ // we need to wait for the allocate dangled to kick in (even though in this case its disabled)
+ // just to make sure
+ Thread.sleep(500);
+
+ logger.info("--> verify that the dangling index does not exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
+
+ logger.info("--> restart start the nodes, but make sure we do recovery only after we have 2 nodes in the cluster");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify that the dangling index does exists now!");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndicesNoAutoImportStillDanglingAndCreatingSameIndex() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local").put("gateway.local.auto_import_dangled", "no")
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .build();
+
+ logger.info("--> starting two nodes");
+ final String node_1 = cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify that the dangling index does not exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
+
+ logger.info("--> close the first node, so we remain with the second that has the dangling index");
+ cluster().stopRandomNode(TestCluster.nameFilter(node_1));
+
+ logger.info("--> index a different doc");
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").setRefresh(true).execute().actionGet();
+
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "2").execute().actionGet().isExists(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java b/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java
new file mode 100644
index 0000000..2eb286c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/QuorumLocalGatewayTests.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numNodes=0, scope=Scope.TEST)
+public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testChangeInitialShardsRecovery() throws Exception {
+ logger.info("--> starting 3 nodes");
+ final String[] nodes = new String[3];
+ nodes[0] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ nodes[1] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ nodes[2] = cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ assertNoFailures(client().admin().indices().prepareRefresh().execute().get());
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+
+ final String nodeToRemove = nodes[between(0,2)];
+ logger.info("--> restarting 1 nodes -- kill 2");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return nodeToRemove.equals(nodeName);
+ }
+ });
+ if (randomBoolean()) {
+ Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate
+ }
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null;
+ }}), equalTo(true)); // wait until we get a cluster state - could be null if we quick enough.
+ final ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ assertThat(clusterStateResponse.getState(), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false));
+ logger.info("--> change the recovery.initial_shards setting, and make sure its recovered");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get();
+
+ logger.info("--> running cluster_health (wait for the shards to startup), 2 shards since we only have 1 node");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testQuorumRecovery() throws Exception {
+
+ logger.info("--> starting 3 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ cluster().startNode(settingsBuilder().put("gateway.type", "local").put("index.number_of_shards", 2).put("index.number_of_replicas", 2).build());
+ // we are shutting down nodes - make sure we don't have 2 clusters if we test network
+ setMinimumMasterNodes(2);
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ assertNoFailures(client().admin().indices().prepareRefresh().get());
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ logger.info("--> restart all nodes");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return null;
+ }
+
+ @Override
+ public void doAfterNodes(int numNodes, final Client activeClient) throws Exception {
+ if (numNodes == 1) {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(4)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
+ }
+ }, 30, TimeUnit.SECONDS), equalTo(true));
+ logger.info("--> one node is closed -- index 1 document into the remaining nodes");
+ activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get();
+ assertNoFailures(activeClient.admin().indices().prepareRefresh().get());
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(activeClient.prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+ }
+
+ });
+ logger.info("--> all nodes are started back, verifying we got the latest version");
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(6)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java b/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java
new file mode 100644
index 0000000..4be01d0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/local/SimpleRecoveryLocalGatewayTests.java
@@ -0,0 +1,428 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.local;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numNodes = 0, scope = Scope.TEST)
+public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTest {
+
+
+ private ImmutableSettings.Builder settingsBuilder() {
+ return ImmutableSettings.settingsBuilder().put("gateway.type", "local");
+ }
+
+ @Test
+ @Slow
+ public void testX() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("appAccountIds").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "10990239").setSource(jsonBuilder().startObject()
+ .field("_id", "10990239")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990473").setSource(jsonBuilder().startObject()
+ .field("_id", "10990473")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990513").setSource(jsonBuilder().startObject()
+ .field("_id", "10990513")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990695").setSource(jsonBuilder().startObject()
+ .field("_id", "10990695")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "11026351").setSource(jsonBuilder().startObject()
+ .field("_id", "11026351")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ }
+
+ @Test
+ @Slow
+ public void testSingleNodeNoFlush() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("field").field("type", "string").endObject().startObject("num").field("type", "integer").endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("_id", "1").field("field", "value1").startArray("num").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("_id", "2").field("field", "value2").startArray("num").value(14).endArray().endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+
+ cluster().fullRestart();
+
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).execute().actionGet(), 1);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).execute().actionGet(), 1);
+ }
+ }
+
+
+ @Test
+ @Slow
+ public void testSingleNodeWithFlush() throws Exception {
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testTwoNodeFirstNodeCleared() throws Exception {
+
+ final String firstNode = cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).build());
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
+ }
+
+ @Override
+ public boolean clearData(String nodeName) {
+ return firstNode.equals(nodeName);
+ }
+
+ });
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testLatestVersionLoaded() throws Exception {
+ // clean two nodes
+
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).put("gateway.recover_after_nodes", 2).build());
+ cluster().startNode(settingsBuilder().put("index.number_of_shards", 1).put("gateway.recover_after_nodes", 2).build());
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid();
+ assertThat(metaDataUuid, not(equalTo("_na_")));
+
+ logger.info("--> closing first node, and indexing more data to the second node");
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public void doAfterNodes(int numNodes, Client client) throws Exception {
+ if (numNodes == 1) {
+ logger.info("--> one node is closed - start indexing data into the second one");
+ client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client.prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ logger.info("--> add some metadata, additional type and template");
+ client.admin().indices().preparePutMapping("test").setType("type2")
+ .setSource(jsonBuilder().startObject().startObject("type2").startObject("_source").field("enabled", false).endObject().endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test", "test_alias", FilterBuilders.termFilter("field", "value")).execute().actionGet();
+ logger.info("--> starting two nodes back, verifying we got the latest version");
+ }
+
+ }
+
+ });
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(), equalTo(metaDataUuid));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.metaData().index("test").mapping("type2"), notNullValue());
+ assertThat(state.metaData().templates().get("template_1").template(), equalTo("te*"));
+ assertThat(state.metaData().index("test").aliases().get("test_alias"), notNullValue());
+ assertThat(state.metaData().index("test").aliases().get("test_alias").filter(), notNullValue());
+ }
+
+ @Test
+ @Slow
+ public void testReusePeerRecovery() throws Exception {
+
+
+ ImmutableSettings.Builder settings = settingsBuilder()
+ .put("action.admin.cluster.node.shutdown.delay", "10ms")
+ .put("gateway.recover_after_nodes", 4)
+
+ .put(BalancedShardsAllocator.SETTING_THRESHOLD, 1.1f); // use less agressive settings
+
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ logger.info("--> indexing docs");
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ if ((i % 200) == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> shutting down the nodes");
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
+ cluster().fullRestart();
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(10)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> shutting down the nodes");
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
+ cluster().fullRestart();
+
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForActiveShards(10)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus("test").setRecovery(true).execute().actionGet();
+ for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
+ for (ShardStatus shardStatus : indexShardStatus) {
+ if (!shardStatus.getShardRouting().primary()) {
+ logger.info("--> shard {}, recovered {}, reuse {}", shardStatus.getShardId(), shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize(), shardStatus.getPeerRecoveryStatus().getReusedIndexSize());
+ assertThat(shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize().bytes(), greaterThan(0l));
+ assertThat(shardStatus.getPeerRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(0l));
+ assertThat(shardStatus.getPeerRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(shardStatus.getPeerRecoveryStatus().getRecoveredIndexSize().bytes()));
+ }
+ }
+ }
+ }
+
+ @Test
+ @Slow
+ public void testRecoveryDifferentNodeOrderStartup() throws Exception {
+ // we need different data paths so we make sure we start the second node fresh
+
+ final String node_1 = cluster().startNode(settingsBuilder().put("path.data", "data/data1").build());
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
+
+ cluster().startNode(settingsBuilder().put("path.data", "data/data2").build());
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ cluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return !node_1.equals(nodeName);
+ }
+ });
+
+
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java b/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java
new file mode 100644
index 0000000..351ed19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/gateway/none/RecoverAfterNodesTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway.none;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest {
+
+ private final static TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(1);
+
+ public ImmutableSet<ClusterBlock> waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ ImmutableSet<ClusterBlock> blocks;
+ do {
+ blocks = nodeClient.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA);
+ }
+ while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis());
+ return blocks;
+ }
+
+ public Client startNode(Settings.Builder settings) {
+ String name = cluster().startNode(settings);
+ return cluster().client(name);
+ }
+
+ @Test
+ public void testRecoverAfterNodes() throws Exception {
+ logger.info("--> start node (1)");
+ Client clientNode1 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (2)");
+ Client clientNode2 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ Thread.sleep(BLOCK_WAIT_TIMEOUT.millis());
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(clientNode2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (3)");
+ Client clientNode3 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode3).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterMasterNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterDataNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java
new file mode 100644
index 0000000..e01f533
--- /dev/null
+++ b/src/test/java/org/elasticsearch/get/GetActionTests.java
@@ -0,0 +1,870 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.get;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class GetActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleGetTests() {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime get 1 (no source, implicit)");
+ response = client().prepareGet("test", "type1", "1").setFields(Strings.EMPTY_ARRAY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no source, explicit)");
+ response = client().prepareGet("test", "type1", "1").setFetchSource(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no type)");
+ response = client().prepareGet("test", null, "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1");
+ response = client().prepareGet("test", "type1", "1").setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> realtime fetch of field (requires fetching parsing source)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (requires fetching parsing source)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").setFetchSource("field1", null).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap(), hasKey("field1"));
+ assertThat(response.getSourceAsMap(), not(hasKey("field2")));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> flush the index, so we load it from it");
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> realtime get 1 (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1 (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime fetch of field (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (loaded from index)");
+ response = client().prepareGet("test", "type1", "1").setFields("field1").setFetchSource(true).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsBytes(), not(nullValue()));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> update doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").execute().actionGet();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_1"));
+
+ logger.info("--> update doc 1 again");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_2", "field2", "value2_2").execute().actionGet();
+
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_2"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_2"));
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+ @Test
+ public void simpleMultiGetTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ensureGreen();
+
+ MultiGetResponse response = client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ response = client().prepareMultiGet()
+ .add("test", "type1", "1")
+ .add("test", "type1", "15")
+ .add("test", "type1", "3")
+ .add("test", "type1", "9")
+ .add("test", "type1", "11")
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(5));
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("15"));
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(false));
+ assertThat(response.getResponses()[2].getId(), equalTo("3"));
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[3].getId(), equalTo("9"));
+ assertThat(response.getResponses()[3].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[4].getId(), equalTo("11"));
+ assertThat(response.getResponses()[4].getResponse().isExists(), equalTo(false));
+
+ // multi get with specific field
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").fields("field"))
+ .add(new MultiGetRequest.Item("test", "type1", "3").fields("field"))
+ .execute().actionGet();
+
+ assertThat(response.getResponses().length, equalTo(2));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsBytes(), nullValue());
+ assertThat(response.getResponses()[0].getResponse().getField("field").getValues().get(0).toString(), equalTo("value1"));
+ }
+
+ @Test
+ public void realtimeGetWithCompress() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("_source").field("compress", true).endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 10000; i++) {
+ sb.append((char) i);
+ }
+ String fieldValue = sb.toString();
+ client().prepareIndex("test", "type", "1").setSource("field", fieldValue).execute().actionGet();
+
+ // realtime get
+ GetResponse getResponse = client().prepareGet("test", "type", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo(fieldValue));
+ }
+
+ @Test
+ public void getFieldsWithDifferentTypes() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", true).endObject().endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("str").field("type", "string").field("store", "yes").endObject()
+ .startObject("strs").field("type", "string").field("store", "yes").endObject()
+ .startObject("int").field("type", "integer").field("store", "yes").endObject()
+ .startObject("ints").field("type", "integer").field("store", "yes").endObject()
+ .startObject("date").field("type", "date").field("store", "yes").endObject()
+ .startObject("binary").field("type", "binary").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type2", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).execute().actionGet();
+
+ // realtime get with stored source
+ logger.info("--> realtime get (from source)");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+
+ logger.info("--> flush the index, so we load it from it");
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> non realtime get (from source)");
+ getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> non realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+ }
+
+ @Test
+ public void testGetDocWithMultivaluedFields() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", mapping1)
+ .addMapping("type2", mapping2)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ response = client().prepareGet("test", "type2", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type2", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject())
+ .execute().actionGet();
+
+ response = client().prepareGet("test", "type1", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getType(), equalTo("type1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+
+ response = client().prepareGet("test", "type2", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getType(), equalTo("type2"));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+ // Now test values being fetched from stored fields.
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareGet("test", "type1", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+
+ response = client().prepareGet("test", "type2", "1")
+ .setFields("field")
+ .execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithExclude() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("excludes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").execute().actionGet();
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").execute().actionGet();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("field"));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithInclude() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").execute().actionGet();
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").execute().actionGet();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFields() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .array("exlcudes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("field2", "extra field to remove").endObject()
+ .startObject("excluded").field("field", "should not be seen").field("field2", "should not be seen").endObject()
+ .endObject())
+ .execute().actionGet();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").execute().actionGet();
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+
+ // now tests that extra source filtering works as expected
+ GetResponse responseBeforeFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource(new String[]{"field", "*.field"}, new String[]{"*.field2"}).get();
+ assertThat(responseBeforeFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), hasKey("included"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), hasKey("field"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), not(hasKey("field2")));
+
+ client().admin().indices().prepareFlush(index).execute().actionGet();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").execute().actionGet();
+ GetResponse responseAfterFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource("*.field", "*.field2").get();
+
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+
+ assertThat(responseAfterFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsString(), is(responseAfterFlushWithExtraFilters.getSourceAsString()));
+ }
+
+ @Test
+ public void testGetWithVersion() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ GetResponse response = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").execute().actionGet();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet("test", "type1", "1").setVersion(1).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(2).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ // From Lucene index:
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet("test", "type1", "1").setVersion(1).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(2).setRealtime(false).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ logger.info("--> index doc 1 again, so increasing the version");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").execute().actionGet();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(1).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ response = client().prepareGet("test", "type1", "1").setVersion(2).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ // From Lucene index:
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareGet("test", "type1", "1").setVersion(0).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet("test", "type1", "1").setVersion(1).setRealtime(false).execute().actionGet();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ }
+
+ response = client().prepareGet("test", "type1", "1").setVersion(2).setRealtime(false).execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(2l));
+ }
+
+ @Test
+ public void testMultiGetWithVersion() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // fine
+ }
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ MultiGetResponse response = client().prepareMultiGet().add("test", "type1", "1").execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(2))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+ //Version from Lucene index
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "1").version(2))
+ .setRealtime(false)
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(2))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+
+
+ //Version from Lucene index
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(0))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(1))
+ .add(new MultiGetRequest.Item("test", "type1", "2").version(2))
+ .setRealtime(false)
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testGetFields_metaData() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .get();
+
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .get();
+
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setFields("field1", "_routing")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ client().prepareGet("my-index", "my-type1", "1")
+ .setFields("field1", "_routing")
+ .setRouting("1")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testGetFields_nonLeafField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type1", jsonBuilder().startObject().startObject("my-type1").startObject("properties")
+ .startObject("field1").startObject("properties")
+ .startObject("field2").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .get();
+
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .get();
+
+ try {
+ client().prepareGet("my-index", "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ try {
+ client().prepareGet("my-index", "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ }
+
+ @Test
+ public void testGetFields_complexField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object")
+ .startObject("field2").field("type", "object")
+ .startObject("field3").field("type", "object")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setSource(source).get();
+
+
+ String field = "field1.field2.field3.field4";
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ client().admin().indices().prepareFlush("my-index").get();
+
+ getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java b/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
new file mode 100644
index 0000000..1b6e4aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+public class IndexRequestBuilderTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSetSource() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ ensureYellow();
+ Map<String, Object> map = new HashMap<String, Object>();
+ map.put("test_field", "foobar");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[] {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar"),
+ client().prepareIndex("test", "test").setSource("{\"test_field\" : \"foobar\"}"),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}")),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), randomBoolean()),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}").toBytes()),
+ client().prepareIndex("test", "test").setSource(map)
+ };
+ indexRandom(true, builders);
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")).get();
+ ElasticsearchAssertions.assertHitCount(searchResponse, builders.length);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testOddNumberOfSourceObjetc() {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar", new Object());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/src/test/java/org/elasticsearch/index/VersionTypeTests.java
new file mode 100644
index 0000000..01aabd2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/VersionTypeTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class VersionTypeTests extends ElasticsearchTestCase {
+ @Test
+ public void testInternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.INTERNAL.isVersionConflict(10, Versions.MATCH_ANY));
+ // if we don't have a version in the index we accept everything
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_SET, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_SET, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we don't like it unless MATCH_ANY
+ assertTrue(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the stupid usual case
+ assertFalse(VersionType.INTERNAL.isVersionConflict(10, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(9, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflict(10, 9));
+
+// Old indexing code, dictating behavior
+// if (expectedVersion != Versions.MATCH_ANY && currentVersion != Versions.NOT_SET) {
+// // an explicit version is provided, see if there is a conflict
+// // if we did not find anything, and a version is provided, so we do expect to find a doc under that version
+// // this is important, since we don't allow to preset a version in order to handle deletes
+// if (currentVersion == Versions.NOT_FOUND) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), Versions.NOT_FOUND, expectedVersion);
+// } else if (expectedVersion != currentVersion) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
+// }
+// }
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+ }
+
+ @Test
+ public void testExternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_SET, 10));
+ // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we always accept
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the standard behavior
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflict(9, 10));
+ assertTrue(VersionType.EXTERNAL.isVersionConflict(10, 9));
+
+
+// Old indexing code, dictating behavior
+// // an external version is provided, just check, if a local version exists, that its higher than it
+// // the actual version checking is one in an external system, and we just want to not index older versions
+// if (currentVersion >= 0) { // we can check!, its there
+// if (currentVersion >= index.version()) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, index.version());
+// }
+// }
+// updatedVersion = index.version();
+ }
+
+
+ @Test
+ public void testUpdateVersion() {
+
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(1, 1), equalTo(2l));
+ assertThat(VersionType.INTERNAL.updateVersion(2, Versions.MATCH_ANY), equalTo(3l));
+
+
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(1, 10), equalTo(10l));
+
+// Old indexing code
+// if (index.versionType() == VersionType.INTERNAL) { // internal version type
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+// } else { // external version type
+// updatedVersion = expectedVersion;
+// }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
new file mode 100644
index 0000000..65ecafb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.FilterBuilder;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.InvalidAliasNameException;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexAliasesServiceTests extends ElasticsearchTestCase {
+ public static IndexAliasesService newIndexAliasesService() {
+ return new IndexAliasesService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS, newIndexQueryParserService());
+ }
+
+ public static IndexQueryParserService newIndexQueryParserService() {
+ Injector injector = new ModulesBuilder().add(
+ new IndicesQueriesModule(),
+ new CacheRecyclerModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new CodecModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexSettingsModule(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexNameModule(new Index("test")),
+ new IndexQueryParserModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new AnalysisModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new SimilarityModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new ScriptModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new SettingsModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexEngineModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new IndexCacheModule(ImmutableSettings.Builder.EMPTY_SETTINGS),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+ return injector.getInstance(IndexQueryParserService.class);
+ }
+
+ public static CompressedString filter(FilterBuilder filterBuilder) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.close();
+ return new CompressedString(builder.string());
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+ indexAliasesService.add("all", null);
+
+ assertThat(indexAliasesService.hasAlias("cats"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false));
+
+ assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("cache(animal:cat)"));
+ assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("BooleanFilter(cache(animal:cat) cache(animal:dog))"));
+
+ // Non-filtering alias should turn off all filters because filters are ORed
+ assertThat(indexAliasesService.aliasFilter("all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("cats", "all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("all", "cats"), nullValue());
+
+ indexAliasesService.add("cats", filter(termFilter("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "canine")));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:canine) cache(animal:feline))"));
+ }
+
+ @Test
+ public void testAliasFilters() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+
+ assertThat(indexAliasesService.aliasFilter(), nullValue());
+ assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("cache(animal:dog)"));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:dog) cache(animal:cat))"));
+
+ indexAliasesService.add("cats", filter(termFilter("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "canine")));
+
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("BooleanFilter(cache(animal:canine) cache(animal:feline))"));
+ }
+
+ @Test(expected = InvalidAliasNameException.class)
+ public void testRemovedAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.remove("cats");
+ indexAliasesService.aliasFilter("cats");
+ }
+
+
+ @Test
+ public void testUnknownAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termFilter("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termFilter("animal", "dog")));
+
+ try {
+ indexAliasesService.aliasFilter("unknown");
+ fail();
+ } catch (InvalidAliasNameException e) {
+ // all is well
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
new file mode 100644
index 0000000..ef94926
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AnalysisModuleTests extends ElasticsearchTestCase {
+
+ private Injector injector;
+
+ public AnalysisService getAnalysisService(Settings settings) {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+
+ @Test
+ public void testSimpleConfigurationJson() {
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build();
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testSimpleConfigurationYaml() {
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build();
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testDefaultFactoryTokenFilters() {
+ assertTokenFilter("keyword_repeat", KeywordRepeatFilter.class);
+ assertTokenFilter("persian_normalization", PersianNormalizationFilter.class);
+ assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class);
+ }
+
+ @Test
+ public void testVersionedAnalyzers() throws Exception {
+ Settings settings2 = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build();
+ AnalysisService analysisService2 = getAnalysisService(settings2);
+
+ // indicesanalysisservice always has the current version
+ IndicesAnalysisService indicesAnalysisService2 = injector.getInstance(IndicesAnalysisService.class);
+ assertThat(indicesAnalysisService2.analyzer("default"), is(instanceOf(NamedAnalyzer.class)));
+ NamedAnalyzer defaultNamedAnalyzer = (NamedAnalyzer) indicesAnalysisService2.analyzer("default");
+ assertThat(defaultNamedAnalyzer.analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertLuceneAnalyzerVersion(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer());
+
+ // analysis service has the expected version
+ assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("standard").analyzer());
+ assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("thai").analyzer());
+ }
+
+ // ugly reflection based hack to extract the lucene version from an analyzer
+ private void assertLuceneAnalyzerVersion(org.apache.lucene.util.Version luceneVersion, Analyzer analyzer) throws Exception {
+ Field field = analyzer.getClass().getSuperclass().getDeclaredField("matchVersion");
+ boolean currentAccessible = field.isAccessible();
+ field.setAccessible(true);
+ Object obj = field.get(analyzer);
+ field.setAccessible(currentAccessible);
+
+ assertThat(obj, instanceOf(org.apache.lucene.util.Version.class));
+ org.apache.lucene.util.Version analyzerVersion = (org.apache.lucene.util.Version) obj;
+ assertThat(analyzerVersion, is(luceneVersion));
+ }
+
+ private void assertTokenFilter(String name, Class clazz) {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(ImmutableSettings.settingsBuilder().build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
+ Tokenizer tokenizer = new WhitespaceTokenizer(Version.CURRENT.luceneVersion, new StringReader("foo bar"));
+ TokenStream stream = tokenFilter.create(tokenizer);
+ assertThat(stream, instanceOf(clazz));
+ }
+
+ private void testSimpleConfiguration(Settings settings) {
+ AnalysisService analysisService = getAnalysisService(settings);
+ Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
+
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
+ assertThat(custom1.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+ assertThat(custom1.tokenFilters().length, equalTo(2));
+
+ StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
+ assertThat(stop1.stopWords().size(), equalTo(1));
+ //assertThat((Iterable<char[]>) stop1.stopWords(), hasItem("test-stop".toCharArray()));
+
+ analyzer = analysisService.analyzer("custom2").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom2 = (CustomAnalyzer) analyzer;
+
+// HtmlStripCharFilterFactory html = (HtmlStripCharFilterFactory) custom2.charFilters()[0];
+// assertThat(html.readAheadLimit(), equalTo(HTMLStripCharFilter.DEFAULT_READ_AHEAD));
+//
+// html = (HtmlStripCharFilterFactory) custom2.charFilters()[1];
+// assertThat(html.readAheadLimit(), equalTo(1024));
+
+ // verify characters mapping
+ analyzer = analysisService.analyzer("custom5").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
+ assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
+
+ // verify aliases
+ analyzer = analysisService.analyzer("alias1").analyzer();
+ assertThat(analyzer, instanceOf(StandardAnalyzer.class));
+
+ // check custom pattern replace filter
+ analyzer = analysisService.analyzer("custom3").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
+ PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
+ assertThat(patternReplaceCharFilterFactory.getPattern().pattern(), equalTo("sample(.*)"));
+ assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
+
+ // check custom class name (my)
+ analyzer = analysisService.analyzer("custom4").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
+ assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
+
+// // verify Czech stemmer
+// analyzer = analysisService.analyzer("czechAnalyzerWithStemmer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer czechstemmeranalyzer = (CustomAnalyzer) analyzer;
+// assertThat(czechstemmeranalyzer.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(czechstemmeranalyzer.tokenFilters().length, equalTo(4));
+// assertThat(czechstemmeranalyzer.tokenFilters()[3], instanceOf(CzechStemTokenFilterFactory.class));
+//
+// // check dictionary decompounder
+// analyzer = analysisService.analyzer("decompoundingAnalyzer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer dictionaryDecompounderAnalyze = (CustomAnalyzer) analyzer;
+// assertThat(dictionaryDecompounderAnalyze.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters().length, equalTo(1));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters()[0], instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+
+ Set<?> wordList = Analysis.getWordSet(null, settings, "index.analysis.filter.dict_dec.word_list", Lucene.VERSION);
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems("donau", "dampf", "schiff", "spargel", "creme", "suppe"));
+ }
+
+ @Test
+ public void testWordListPath() throws Exception {
+ Environment env = new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"};
+
+ File wordListFile = generateWordList(words);
+ Settings settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.getAbsolutePath()).build();
+
+ Set<?> wordList = Analysis.getWordSet(env, settings, "index.word_list", Lucene.VERSION);
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems(words));
+ }
+
+ private File generateWordList(String[] words) throws Exception {
+ File wordListFile = File.createTempFile("wordlist", ".txt");
+ wordListFile.deleteOnExit();
+
+ BufferedWriter writer = null;
+ try {
+ writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(wordListFile), Charsets.UTF_8));
+ for (String word : words) {
+ writer.write(word);
+ writer.write('\n');
+ }
+ } finally {
+ if (writer != null) {
+ writer.close();
+ }
+ }
+ return wordListFile;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
new file mode 100644
index 0000000..ddf7b9d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+public class AnalysisTestsHelper {
+
+ public static AnalysisService createAnalysisServiceFromClassPath(String resource) {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .loadFromClasspath(resource).build();
+
+ return createAnalysisServiceFromSettings(settings);
+ }
+
+ public static AnalysisService createAnalysisServiceFromSettings(
+ Settings settings) {
+ Index index = new Index("test");
+
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+
+ AnalysisModule analysisModule = new AnalysisModule(settings,
+ parentInjector.getInstance(IndicesAnalysisService.class));
+
+ Injector injector = new ModulesBuilder().add(new IndexSettingsModule(index, settings),
+ new IndexNameModule(index), analysisModule).createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
new file mode 100644
index 0000000..3a23086
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Ignore;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+
+/**
+ */
+public class AnalyzerBackwardsCompatTests extends ElasticsearchTokenStreamTestCase {
+
+ @Ignore
+ private void testNoStopwordsAfter(org.elasticsearch.Version noStopwordVersion, String type) throws IOException {
+ final int iters = atLeast(10);
+ org.elasticsearch.Version version = org.elasticsearch.Version.CURRENT;
+ for (int i = 0; i < iters; i++) {
+ ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ if (version.onOrAfter(noStopwordVersion)) {
+ if (random().nextBoolean()) {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ } else {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ builder.put("index.analysis.analyzer.foo.type", type);
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ NamedAnalyzer analyzer = analysisService.analyzer("foo");
+ if (version.onOrAfter(noStopwordVersion)) {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"this", "is", "bogus"});
+ } else {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"bogus"});
+ }
+ version = randomVersion();
+ }
+ }
+
+ public void testPatternAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "pattern");
+ }
+
+ public void testStandardHTMLStripAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "standard_html_strip");
+ }
+
+ public void testStandardAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_Beta1, "standard");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
new file mode 100644
index 0000000..e657731
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/cjk_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testNoFlags() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanUnigramOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only");
+ String source = "多くの学生が試験に落ちた。";
+ String[] expected = new String[]{"多", "く", "の", "学", "学生", "生", "が", "試", "試験", "験", "に", "落", "ち", "た" };
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
new file mode 100644
index 0000000..a168586
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ */
+public class CharFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testMappingCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put("index.analysis.char_filter.my_mapping.type", "mapping")
+ .putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q")
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+ }
+
+ @Test
+ public void testHtmlStripCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip")
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
new file mode 100644
index 0000000..61fa566
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class CompoundAnalysisTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = getJsonSettings();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
+ MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+ }
+
+ @Test
+ public void testDictionaryDecompounder() throws Exception {
+ Settings[] settingsArr = new Settings[]{getJsonSettings(), getYamlSettings()};
+ for (Settings settings : settingsArr) {
+ List<String> terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe");
+ MatcherAssert.assertThat(terms.size(), equalTo(8));
+ MatcherAssert.assertThat(terms, hasItems("donau", "dampf", "schiff", "donaudampfschiff", "spargel", "creme", "suppe", "spargelcremesuppe"));
+ }
+ }
+
+ private List<String> analyze(Settings settings, String analyzerName, String text) throws IOException {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", text, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ List<String> terms = new ArrayList<String>();
+ while (stream.incrementToken()) {
+ String tokText = termAtt.toString();
+ terms.add(tokText);
+ }
+ return terms;
+ }
+
+ private Settings getJsonSettings() {
+ return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build();
+ }
+
+ private Settings getYamlSettings() {
+ return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
new file mode 100644
index 0000000..b49d719
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class HunspellTokenFilterFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDedup() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(true));
+
+ settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.dedup", false)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(false));
+ }
+
+ @Test
+ public void testDefaultRecursionLevel() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.recursionLevel(), is(2));
+ }
+
+ @Test
+ public void testCustomRecursionLevel() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.recursion_level", 0)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.recursionLevel(), is(0));
+ }
+
+ @Test(expected = ProvisionException.class)
+ public void negativeRecursionLevelShouldFail() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.recursion_level", -1)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
new file mode 100644
index 0000000..cc2df1b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.FailedToResolveConfigException;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/keep_analysis.json";
+
+
+ @Test
+ public void testLoadWithoutSettings() {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep");
+ Assert.assertNull(tokenFilter);
+ }
+
+ @Test
+ public void testLoadOverConfiguredSettings() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]")
+ .build();
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("path and array are configured");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+ }
+
+ @Test
+ public void testKeepWordsPathSettings() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.non_broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception due to non existent keep_words_path");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(FailedToResolveConfigException.class));
+ }
+
+ settings = ImmutableSettings.settingsBuilder().put(settings)
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words", new String[]{"test"})
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] ");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+
+ }
+
+ @Test
+ public void testCaseInsensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "hello small world";
+ String[] expected = new String[]{"hello", "world"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1, 2});
+ }
+
+ @Test
+ public void testCaseSensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "Hello small world";
+ String[] expected = new String[]{"Hello"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1});
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
new file mode 100644
index 0000000..9d43d7e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_default.type", "limit").build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 17).put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown", "fox" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
new file mode 100644
index 0000000..c17e3e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ngram.*;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testParseTokenChars() {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ for (String tokenChars : Arrays.asList("letters", "number", "DIRECTIONALITY_UNDEFINED")) {
+ final Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ try {
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(""));
+ fail();
+ } catch (ElasticsearchIllegalArgumentException expected) {
+ // OK
+ }
+ }
+ for (String tokenChars : Arrays.asList("letter", " digit ", "punctuation", "DIGIT", "CoNtRoL", "dash_punctuation")) {
+ final Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(""));
+ // no exception
+ }
+ }
+
+ @Test
+ public void testPreTokenization() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ assertTokenStreamContents(new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("Åbc déf g\uD801\uDC00f ")),
+ new String[] {"Åb", "Åbc", "bc", "dé", "déf", "éf", "g\uD801\uDC00", "g\uD801\uDC00f", "\uD801\uDC00f"});
+ settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ assertTokenStreamContents(new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(" a!$ 9")),
+ new String[] {" a", " a!", "a!", "a!$", "!$", "!$ ", "$ ", "$ 9", " 9"});
+ }
+
+ @Test
+ public void testPreTokenizationEdge() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = ImmutableSettings.EMPTY;
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ assertTokenStreamContents(new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("Åbc déf g\uD801\uDC00f ")),
+ new String[] {"Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f"});
+ settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ assertTokenStreamContents(new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(" a!$ 9")),
+ new String[] {" a", " a!"});
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenizer() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ final Index index = new Index("test");
+ final String name = "ngr";
+ for (int i = 0; i < iters; i++) {
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ if (compatVersion) {
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ } else {
+ assertThat(edgeNGramTokenizer, instanceOf(EdgeNGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ }
+ }
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ try {
+ new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader("foo bar"));
+ fail("should fail side:back is not supported anymore");
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+
+ }
+
+ @Test
+ public void testBackwardsCompatibilityNgramTokenizer() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ if (compatVersion) {
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ } else {
+ assertThat(nGramTokenizer, instanceOf(NGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3).build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create(new StringReader(
+ "foo bar"));
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ }
+ }
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws IllegalArgumentException, IllegalAccessException {
+ int iters = atLeast(20);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3);
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(new MockTokenizer(new StringReader(
+ "foo bar")));
+ if (compatVersion) {
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ } else if (reverse && !compatVersion){
+ assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class));
+ } else {
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ }
+
+ } else {
+ Builder builder = ImmutableSettings.builder().put("min_gram", 2).put("max_gram", 3);
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(new MockTokenizer(new StringReader(
+ "foo bar")));
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ }
+ }
+ }
+
+
+ private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ List<Field> versionFields = new ArrayList<Field>();
+ for (Field field : declaredFields) {
+ if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) {
+ versionFields.add(field);
+ }
+ }
+ return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
new file mode 100644
index 0000000..7f7b363
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class NumericAnalyzerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAttributeEqual() throws IOException {
+ final int precisionStep = 8;
+ final double value = randomDouble();
+ NumericDoubleAnalyzer analyzer = new NumericDoubleAnalyzer(precisionStep);
+
+ final TokenStream ts1 = analyzer.tokenStream("dummy", String.valueOf(value));
+ final NumericTokenStream ts2 = new NumericTokenStream(precisionStep);
+ ts2.setDoubleValue(value);
+ final NumericTermAttribute numTerm1 = ts1.addAttribute(NumericTermAttribute.class);
+ final NumericTermAttribute numTerm2 = ts1.addAttribute(NumericTermAttribute.class);
+ final PositionIncrementAttribute posInc1 = ts1.addAttribute(PositionIncrementAttribute.class);
+ final PositionIncrementAttribute posInc2 = ts1.addAttribute(PositionIncrementAttribute.class);
+ ts1.reset();
+ ts2.reset();
+ while (ts1.incrementToken()) {
+ assertThat(ts2.incrementToken(), is(true));
+ assertThat(posInc1, equalTo(posInc2));
+ // can't use equalTo directly on the numeric attribute cause it doesn't implement equals (LUCENE-5070)
+ assertThat(numTerm1.getRawValue(), equalTo(numTerm2.getRawValue()));
+ assertThat(numTerm2.getShift(), equalTo(numTerm2.getShift()));
+ }
+ assertThat(ts2.incrementToken(), is(false));
+ ts1.end();
+ ts2.end();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
new file mode 100644
index 0000000..20f5474
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testPatternCaptureTokenFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/pattern_capture.json").build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("single");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[]{"foobarbaz","foobar","foo"});
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("multi");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "abc123def"), new String[]{"abc123def","abc","123","def"});
+
+ NamedAnalyzer analyzer3 = analysisService.analyzer("preserve");
+
+ assertTokenStreamContents(analyzer3.tokenStream("test", "foobarbaz"), new String[]{"foobar","foo"});
+ }
+
+
+ @Test(expected=ElasticsearchIllegalArgumentException.class)
+ public void testNoPatterns() {
+ new PatternCaptureGroupTokenFilterFactory(new Index("test"), settingsBuilder().build(), "pattern_capture", settingsBuilder().put("pattern", "foobar").build());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
new file mode 100644
index 0000000..f9dc905
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerProviderFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVersioningInFactoryProvider() throws Exception {
+ PreBuiltAnalyzerProviderFactory factory = new PreBuiltAnalyzerProviderFactory("default", AnalyzerScope.INDEX, PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT));
+
+ AnalyzerProvider currentAnalyzerProvider = factory.create("default", ImmutableSettings.Builder.EMPTY_SETTINGS);
+ AnalyzerProvider former090AnalyzerProvider = factory.create("default", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ AnalyzerProvider currentAnalyzerProviderReference = factory.create("default", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ // would love to access the version inside of the lucene analyzer, but that is not possible...
+ assertThat(currentAnalyzerProvider, is(currentAnalyzerProviderReference));
+ assertThat(currentAnalyzerProvider, is(not(former090AnalyzerProvider)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
new file mode 100644
index 0000000..7719df6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.CURRENT);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ }
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerChangedIn10Beta1() throws IOException {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_1_0_0_Beta1);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+ final int n = atLeast(10);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_Beta1)) {
+ assertThat(currentDefaultAnalyzer, is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version)));
+ } else {
+ assertThat(currentDefaultAnalyzer, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<String>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
+ assertThat(list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void testAnalyzerChangedIn10RC1() throws IOException {
+ Analyzer pattern = PreBuiltAnalyzers.PATTERN.getAnalyzer(Version.V_1_0_0_RC1);
+ Analyzer standardHtml = PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(Version.V_1_0_0_RC1);
+ final int n = atLeast(10);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_RC1)) {
+ assertThat(pattern, is(PreBuiltAnalyzers.PATTERN.getAnalyzer(version)));
+ assertThat(standardHtml, is(PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version)));
+ } else {
+ assertThat(pattern, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ assertThat(standardHtml, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = randomBoolean() ? PreBuiltAnalyzers.PATTERN.getAnalyzer(version) : PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<String>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_RC1)) {
+ assertThat(list.toString(), list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
+ assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesAreCachedAndReused() {
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)));
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesWithSameLuceneVersionAreReused() {
+ // both are lucene 4.4 and should return the same instance
+ assertThat(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_4),
+ is(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_5)));
+ }
+
+ @Test
+ public void testThatAnalyzersAreUsedInMapping() throws IOException {
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers randomPreBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String analyzerName = randomPreBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion();
+ Settings indexSettings = ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
+
+ NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", analyzerName).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+
+ FieldMapper fieldMapper = docMapper.mappers().name("field").mapper();
+ assertThat(fieldMapper.searchAnalyzer(), instanceOf(NamedAnalyzer.class));
+ NamedAnalyzer fieldMapperNamedAnalyzer = (NamedAnalyzer) fieldMapper.searchAnalyzer();
+
+ assertThat(fieldMapperNamedAnalyzer.analyzer(), is(namedAnalyzer.analyzer()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
new file mode 100644
index 0000000..d7d5f71
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ *
+ */
+public class PreBuiltCharFilterFactoryFactoryTests {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltCharFilterFactoryFactory factory = new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT));
+
+ CharFilterFactory emptySettingsTokenizerFactory = factory.create("html_strip", ImmutableSettings.EMPTY);
+ CharFilterFactory former090TokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory former090TokenizerFactoryCopy = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory currentTokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
new file mode 100644
index 0000000..a3c45d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenFilterFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatCachingWorksForCachingStrategyOne() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.WORD_DELIMITER.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory emptySettingsTokenizerFactory = factory.create("word_delimiter", ImmutableSettings.EMPTY);
+ TokenFilterFactory former090TokenizerFactory = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("word_delimiter", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.STOP.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory emptySettingsTokenizerFactory = factory.create("stop", ImmutableSettings.EMPTY);
+ TokenFilterFactory former090TokenizerFactory = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("stop", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
new file mode 100644
index 0000000..279bf7b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenizerFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenizerFactoryFactory factory = new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.STANDARD.getTokenizerFactory(Version.CURRENT));
+
+ TokenizerFactory emptySettingsTokenizerFactory = factory.create("standard", ImmutableSettings.EMPTY);
+ // different es versions, same lucene version, thus cached
+ TokenizerFactory former090TokenizerFactory = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenizerFactory former090TokenizerFactoryCopy = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenizerFactory currentTokenizerFactory = factory.create("standard", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(emptySettingsTokenizerFactory, is(not(former090TokenizerFactoryCopy)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
new file mode 100644
index 0000000..6cc06be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+@ThreadLeakScope(Scope.NONE)
+public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/shingle_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle");
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the_quick_brown", "quick_brown_fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMappingNoShingles() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick";
+ String[] expected = new String[]{"the", "quick"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
new file mode 100644
index 0000000..daf0de9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class StopAnalyzerTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/stop.json").build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("analyzer1");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "to be or not to be"), new String[0]);
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("analyzer2");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "to be or not to be"), new String[0]);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
new file mode 100644
index 0000000..d2a1727
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+
+public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test(expected = ProvisionException.class)
+ public void testPositionIncrementSetting() throws IOException {
+ Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false);
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.version", "4.4");
+ }
+ Settings settings = builder.build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ analysisService.tokenFilter("my_stop");
+ }
+
+ @Test
+ public void testCorrectPositionIncrementSetting() throws IOException {
+ Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.enable_position_increments", true);
+ }
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.version", Version.values()[random().nextInt(Version.values().length)]);
+ }
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo bar")));
+ assertThat(create, instanceOf(StopFilter.class));
+ assertThat(((StopFilter)create).getEnablePositionIncrements(), equalTo(true));
+ }
+
+ @Test
+ public void testDeprecatedPositionIncrementSettingWithVerions() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false).put("index.analysis.filter.my_stop.version", "4.3")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo bar")));
+ assertThat(create, instanceOf(StopFilter.class));
+ assertThat(((StopFilter)create).getEnablePositionIncrements(), equalTo(false));
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.remove_trailing", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ TokenStream create = tokenFilter.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("foo an")));
+ assertThat(create, instanceOf(SuggestStopFilter.class));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
new file mode 100644
index 0000000..2320c22
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateWords() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateNumbers() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateAll() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testSplitOnCaseChange() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot";
+ String[] expected = new String[]{"PowerShot"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testPreserveOriginal() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testStemEnglishPossessive() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json b/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
new file mode 100644
index 0000000..89a1281
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
@@ -0,0 +1,37 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "cjk_all_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "han",
+ "hiragana",
+ "katakana",
+ "hangul",
+ "foobar"
+ ]
+ },
+ "cjk_han_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":false,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_han_unigram_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_no_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
new file mode 100644
index 0000000..dcbc026
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.commongrams;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.AnalysisTestsHelper;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams").build();
+
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("[common_words] or [common_words_path] is set");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class));
+ }
+ }
+ @Test
+ public void testWithoutCommonWordsMatch() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .put("index.analysis.filter.common_grams_default.query_mode", false)
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "fox_or", "or", "or_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "or", "why", "why_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testCommonGramsAnalysis() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams.json").build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeSettings() throws IOException {
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox_or", "or_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .put("index.analysis.filter.common_grams_3.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_4.type", "common_grams")
+ .put("index.analysis.filter.common_grams_4.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeCommonGramsAnalysis() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json").build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt b/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
new file mode 100644
index 0000000..f97b799
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
@@ -0,0 +1,2 @@
+brown
+fox
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
new file mode 100644
index 0000000..6db49fc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
@@ -0,0 +1,29 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
new file mode 100644
index 0000000..6f0c015
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
@@ -0,0 +1,31 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java b/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
new file mode 100644
index 0000000..a30d520
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis.filter1;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public MyFilterTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name) {
+ super(index, indexSettings, name, ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new StopFilter(version, tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json b/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
new file mode 100644
index 0000000..233d6f3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
@@ -0,0 +1,19 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "my_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "keep_words_case" : true
+ },
+ "my_case_sensitive_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "enable_position_increments" : false,
+ "version" : "4.2"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json b/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
new file mode 100644
index 0000000..d82fb98
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
@@ -0,0 +1,46 @@
+{
+ "index": {
+ "number_of_shards": 1,
+ "number_of_replicas": 0,
+ "analysis": {
+ "filter": {
+ "single": {
+ "type": "pattern_capture",
+ "patterns": "((...)...)"
+ },
+ "multi": {
+ "type": "pattern_capture",
+ "patterns": [
+ "(\\d+)",
+ "([a-z]+)"
+ ]
+ },
+ "preserve": {
+ "type": "pattern_capture",
+ "preserve_original": false,
+ "patterns": "((...)...)"
+ }
+ },
+ "analyzer": {
+ "single": {
+ "tokenizer": "keyword",
+ "filter": [
+ "single"
+ ]
+ },
+ "multi": {
+ "tokenizer": "keyword",
+ "filter": [
+ "multi"
+ ]
+ },
+ "preserve": {
+ "tokenizer": "keyword",
+ "filter": [
+ "preserve"
+ ]
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json b/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
new file mode 100644
index 0000000..c469a4a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
@@ -0,0 +1,16 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "shingle_inverse":{
+ "type":"shingle",
+ "max_shingle_size" : 3,
+ "min_shingle_size" : 3,
+ "output_unigrams" : false,
+ "output_unigrams_if_no_shingles" : true,
+ "token_separator" : "_"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/stop.json b/src/test/java/org/elasticsearch/index/analysis/stop.json
new file mode 100644
index 0000000..717c9fd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/stop.json
@@ -0,0 +1,18 @@
+{
+ "index":{
+ "number_of_shards":1,
+ "number_of_replicas":0,
+ "analysis":{
+ "analyzer":{
+ "analyzer1":{
+ "type":"stop",
+ "stopwords":["_english_"]
+ },
+ "analyzer2":{
+ "type":"stop",
+ "stopwords":"_english_"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
new file mode 100644
index 0000000..a578e39
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.synonyms;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SynonymsAnalysisTest extends ElasticsearchTestCase {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+ private AnalysisService analysisService;
+
+ @Test
+ public void testSynonymsAnalysis() throws IOException {
+
+ Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/synonyms/synonyms.json").build();
+
+ Index index = new Index("test");
+
+ Injector parentInjector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)),
+ new IndicesAnalysisModule())
+ .createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ analysisService = injector.getInstance(AnalysisService.class);
+
+ match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzerWordnet", "abstain", "abstain refrain desist");
+ match("synonymAnalyzerWordnet_file", "abstain", "abstain refrain desist");
+
+ }
+
+ private void match(String analyzerName, String source, String target) throws IOException {
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field", source, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ StringBuilder sb = new StringBuilder();
+ while (stream.incrementToken()) {
+ sb.append(termAtt.toString()).append(" ");
+ }
+
+ MatcherAssert.assertThat(target, equalTo(sb.toString().trim()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
new file mode 100644
index 0000000..d23d6ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
@@ -0,0 +1,52 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "synonymAnalyzer":{
+ "tokenizer":"standard",
+ "filter":[ "synonym" ]
+ },
+ "synonymAnalyzer_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonym_file" ]
+ },
+ "synonymAnalyzerWordnet":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet" ]
+ },
+ "synonymAnalyzerWordnet_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet_file" ]
+ }
+ },
+ "filter":{
+ "synonym":{
+ "type":"synonym",
+ "synonyms":[
+ "kimchy => shay",
+ "dude => elasticsearch",
+ "abides => man!"
+ ]
+ },
+ "synonym_file":{
+ "type":"synonym",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms.txt"
+ },
+ "synonymWordnet":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms":[
+ "s(100000001,1,'abstain',v,1,0).",
+ "s(100000001,2,'refrain',v,1,0).",
+ "s(100000001,3,'desist',v,1,0)."
+ ]
+ },
+ "synonymWordnet_file":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
new file mode 100644
index 0000000..ef4b225
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
@@ -0,0 +1,3 @@
+kimchy => shay
+dude => elasticsearch
+abides => man!
diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
new file mode 100644
index 0000000..f7b68e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
@@ -0,0 +1,3 @@
+s(100000001,1,'abstain',v,1,0).
+s(100000001,2,'refrain',v,1,0).
+s(100000001,3,'desist',v,1,0).
diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.json b/src/test/java/org/elasticsearch/index/analysis/test1.json
new file mode 100644
index 0000000..3b503d1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/test1.json
@@ -0,0 +1,80 @@
+{
+ "index":{
+ "analysis":{
+ "tokenizer":{
+ "standard":{
+ "type":"standard"
+ }
+ },
+ "char_filter":{
+ "my_html":{
+ "type":"html_strip",
+ "escaped_tags":["xxx", "yyy"],
+ "read_ahead":1024
+ },
+ "my_pattern":{
+ "type":"pattern_replace",
+ "pattern":"sample(.*)",
+ "replacement":"replacedSample $1"
+ },
+ "my_mapping":{
+ "type":"mapping",
+ "mappings":["ph=>f", "qu=>q"]
+ }
+ },
+ "filter":{
+ "stop":{
+ "type":"stop",
+ "stopwords":["test-stop"]
+ },
+ "stop2":{
+ "type":"stop",
+ "stopwords":["stop2-1", "stop2-2"]
+ },
+ "my":{
+ "type":"org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory"
+ },
+ "dict_dec":{
+ "type":"dictionary_decompounder",
+ "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"]
+ }
+ },
+ "analyzer":{
+ "standard":{
+ "alias":"alias1,alias2",
+ "type":"standard",
+ "stopwords":["test1", "test2", "test3"]
+ },
+ "custom1":{
+ "alias":["alias4", "alias5"],
+ "tokenizer":"standard",
+ "filter":["stop", "stop2"]
+ },
+ "custom2":{
+ "tokenizer":"standard",
+ "char_filter":["html_strip", "my_html"]
+ },
+ "custom3":{
+ "tokenizer":"standard",
+ "char_filter":["my_pattern"]
+ },
+ "custom4":{
+ "tokenizer":"standard",
+ "filter":["my"]
+ },
+ "custom5":{
+ "tokenizer":"standard",
+ "char_filter":["my_mapping"]
+ },
+ "czechAnalyzerWithStemmer":{
+ "tokenizer":"standard",
+ "filter":["standard", "lowercase", "stop", "czech_stem"]
+ },
+ "decompoundingAnalyzer":{
+ "tokenizer":"standard",
+ "filter":["dict_dec"]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.yml b/src/test/java/org/elasticsearch/index/analysis/test1.yml
new file mode 100644
index 0000000..9c4aac6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/analysis/test1.yml
@@ -0,0 +1,61 @@
+index :
+ analysis :
+ tokenizer :
+ standard :
+ type : standard
+ char_filter :
+ my_html :
+ type : html_strip
+ escaped_tags : [xxx, yyy]
+ read_ahead : 1024
+ my_pattern :
+ type: pattern_replace
+ pattern: sample(.*)
+ replacement: replacedSample $1
+ my_mapping :
+ type : mapping
+ mappings : [ph=>f, qu=>q]
+ filter :
+ stop :
+ type : stop
+ stopwords : [test-stop]
+ stop2 :
+ type : stop
+ stopwords : [stop2-1, stop2-2]
+ my :
+ type : org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory
+ dict_dec :
+ type : dictionary_decompounder
+ word_list : [donau, dampf, schiff, spargel, creme, suppe]
+ analyzer :
+ standard :
+ alias: alias1,alias2
+ type : standard
+ stopwords : [test1, test2, test3]
+ custom1 :
+ alias : [alias4, alias5]
+ tokenizer : standard
+ filter : [stop, stop2]
+ custom2 :
+ tokenizer : standard
+ char_filter : [html_strip, my_html]
+ custom3 :
+ tokenizer : standard
+ char_filter : [my_pattern]
+ custom4 :
+ tokenizer : standard
+ filter : [my]
+ custom5 :
+ tokenizer : standard
+ char_filter : [my_mapping]
+ custom6 :
+ type : standard
+ custom7 :
+ type : standard
+ version: 3.6
+ czechAnalyzerWithStemmer :
+ tokenizer : standard
+ filter : [standard, lowercase, stop, czech_stem]
+ decompoundingAnalyzer :
+ tokenizer : standard
+ filter : [dict_dec] \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java b/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java
new file mode 100644
index 0000000..3133ce2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/cache/filter/FilterCacheTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.filter;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class FilterCacheTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testNoCache() throws Exception {
+ verifyCache(new NoneFilterCache(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ private void verifyCache(FilterCache filterCache) throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+
+ for (int i = 0; i < 100; i++) {
+ Document document = new Document();
+ document.add(new TextField("id", Integer.toString(i), Field.Store.YES));
+ indexWriter.addDocument(document);
+ }
+
+ reader = refreshReader(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ assertThat(Lucene.count(searcher, new ConstantScoreQuery(filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
+ assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
+
+ indexWriter.deleteDocuments(new Term("id", "1"));
+ reader = refreshReader(reader);
+ searcher = new IndexSearcher(reader);
+ TermFilter filter = new TermFilter(new Term("id", "1"));
+ Filter cachedFilter = filterCache.cache(filter);
+ long constantScoreCount = filter == cachedFilter ? 0 : 1;
+ // sadly, when caching based on cacheKey with NRT, this fails, that's why we have DeletionAware one
+ assertThat(Lucene.count(searcher, new ConstantScoreQuery(cachedFilter)), equalTo(constantScoreCount));
+ assertThat(Lucene.count(searcher, new XConstantScoreQuery(cachedFilter)), equalTo(0l));
+ assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), cachedFilter)), equalTo(0l));
+
+ indexWriter.close();
+ }
+
+ private DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
+ IndexReader oldReader = reader;
+ reader = DirectoryReader.openIfChanged(reader);
+ if (reader != oldReader) {
+ oldReader.close();
+ }
+ return reader;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java b/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java
new file mode 100644
index 0000000..68ab67e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.id;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexShardMissingException;
+import org.elasticsearch.index.aliases.IndexAliasesService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.IndexCache;
+import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
+import org.elasticsearch.index.engine.IndexEngine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.gateway.IndexGateway;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class SimpleIdCacheTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDeletedDocuments() throws Exception {
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ final Document parent = doc("parent", "1");
+ writer.addDocument(parent);
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.commit();
+
+ final String parentUid = parent.get("_uid");
+ assertNotNull(parentUid);
+ writer.deleteDocuments(new Term("_uid", parentUid));
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+
+ assertThat(leaves.size(), equalTo(1));
+ IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
+ IdReaderTypeCache typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
+ }
+
+ @Test
+ public void testRefresh() throws Exception {
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ writer.addDocument(doc("parent", "1"));
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.addDocument(childDoc("child", "4", "parent", "1"));
+ writer.commit();
+
+ // Begins with child, ends with parent docs
+ writer.addDocument(childDoc("child", "5", "parent", "2"));
+ writer.addDocument(doc("parent", "2"));
+ writer.addDocument(doc("parent", "3"));
+ writer.addDocument(doc("parent", "4"));
+ writer.addDocument(doc("parent", "5"));
+ writer.commit();
+
+ // Begins with parent, child docs in the middle and ends with parent doc
+ writer.addDocument(doc("parent", "6"));
+ writer.addDocument(childDoc("child", "6", "parent", "6"));
+ writer.addDocument(childDoc("child", "7", "parent", "6"));
+ writer.addDocument(childDoc("child", "8", "parent", "5"));
+ writer.addDocument(childDoc("child", "9", "parent", "4"));
+ writer.addDocument(doc("parent", "7"));
+ writer.commit();
+
+ // Garbage segment
+ writer.addDocument(doc("zzz", "1"));
+ writer.addDocument(doc("xxx", "2"));
+ writer.addDocument(doc("aaa", "3"));
+ writer.addDocument(doc("ccc", "4"));
+ writer.addDocument(doc("parent", "8"));
+ writer.commit();
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+
+ // Verify simple id cache for segment 1
+ IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ IdReaderTypeCache typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4), nullValue());
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("1"));
+ assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("1"));
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("1"))), equalTo(0));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(-1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(-1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(-1));
+
+ // Verify simple id cache for segment 2
+ readerCache = idCache.reader(leaves.get(1).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0), nullValue());
+ assertThat(typeCache.idByDoc(1).toUtf8(), equalTo("2"));
+ assertThat(typeCache.idByDoc(2).toUtf8(), equalTo("3"));
+ assertThat(typeCache.idByDoc(3).toUtf8(), equalTo("4"));
+ assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("5"));
+
+ assertThat(typeCache.parentIdByDoc(0).toUtf8(), equalTo("2"));
+ assertThat(typeCache.parentIdByDoc(1), nullValue());
+ assertThat(typeCache.parentIdByDoc(2), nullValue());
+ assertThat(typeCache.parentIdByDoc(3), nullValue());
+ assertThat(typeCache.parentIdByDoc(4), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(1));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(2));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(3));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("5"))), equalTo(4));
+
+ // Verify simple id cache for segment 3
+ readerCache = idCache.reader(leaves.get(2).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("6"));
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4), nullValue());
+ assertThat(typeCache.idByDoc(5).toUtf8(), equalTo("7"));
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("6"));
+ assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("6"));
+ assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("5"));
+ assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("4"));
+ assertThat(typeCache.parentIdByDoc(5), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("6"))), equalTo(0));
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("7"))), equalTo(5));
+
+ // Verify simple id cache for segment 4
+ readerCache = idCache.reader(leaves.get(3).reader());
+ assertThat(readerCache.type("child"), nullValue());
+ typeCache = readerCache.type("parent");
+ assertThat(typeCache.idByDoc(0), nullValue());
+ assertThat(typeCache.idByDoc(1), nullValue());
+ assertThat(typeCache.idByDoc(2), nullValue());
+ assertThat(typeCache.idByDoc(3), nullValue());
+ assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("8"));
+
+ assertThat(typeCache.parentIdByDoc(0), nullValue());
+ assertThat(typeCache.parentIdByDoc(1), nullValue());
+ assertThat(typeCache.parentIdByDoc(2), nullValue());
+ assertThat(typeCache.parentIdByDoc(3), nullValue());
+ assertThat(typeCache.parentIdByDoc(4), nullValue());
+
+ assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("8"))), equalTo(4));
+ }
+
+ @Test(expected = AssertionError.class)
+ public void testRefresh_tripAssert() throws Exception {
+ assumeTrue(ASSERTIONS_ENABLED);
+ SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
+ IndexWriter writer = createIndexWriter();
+ // Begins with parent, ends with child docs
+ writer.addDocument(doc("parent", "1"));
+ writer.addDocument(childDoc("child", "1", "parent", "1"));
+ writer.addDocument(childDoc("child", "2", "parent", "1"));
+ writer.addDocument(childDoc("child", "3", "parent", "1"));
+ writer.addDocument(childDoc("child", "4", "parent", "1"));
+ // Doc like this should never end up in the index, just wanna trip an assert here!
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, "parent", Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ writer.close();
+ DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
+ List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
+ idCache.refresh(leaves);
+ }
+
+ private Document doc(String type, String id) {
+ Document parent = new Document();
+ parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
+ return parent;
+ }
+
+ private Document childDoc(String type, String id, String parentType, String parentId) {
+ Document parent = new Document();
+ parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
+ parent.add(new StringField(ParentFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", parentType, parentId), Field.Store.NO));
+ return parent;
+ }
+
+ private SimpleIdCache createSimpleIdCache(Tuple<String, String>... documentTypes) throws IOException {
+ Settings settings = ImmutableSettings.EMPTY;
+ Index index = new Index("test");
+ SimpleIdCache idCache = new SimpleIdCache(index, settings);
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ idCache.setIndexService(new StubIndexService(mapperService));
+
+ for (Tuple<String, String> documentType : documentTypes) {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(documentType.v1())
+ .startObject("_parent").field("type", documentType.v2()).endObject()
+ .endObject().endObject().string();
+ mapperService.merge(documentType.v1(), new CompressedString(defaultMapping), true);
+ }
+
+ return idCache;
+ }
+
+ private IndexWriter createIndexWriter() throws IOException {
+ return new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
+ }
+
+ public static class StubIndexService implements IndexService {
+
+ private final MapperService mapperService;
+
+ public StubIndexService(MapperService mapperService) {
+ this.mapperService = mapperService;
+ }
+
+ @Override
+ public Injector injector() {
+ return null;
+ }
+
+ @Override
+ public IndexGateway gateway() {
+ return null;
+ }
+
+ @Override
+ public IndexCache cache() {
+ return null;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return null;
+ }
+
+ @Override
+ public IndexSettingsService settingsService() {
+ return null;
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return null;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return mapperService;
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return null;
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public IndexAliasesService aliasesService() {
+ return null;
+ }
+
+ @Override
+ public IndexEngine engine() {
+ return null;
+ }
+
+ @Override
+ public IndexStore store() {
+ return null;
+ }
+
+ @Override
+ public IndexShard createShard(int sShardId) throws ElasticsearchException {
+ return null;
+ }
+
+ @Override
+ public void removeShard(int shardId, String reason) throws ElasticsearchException {
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 0;
+ }
+
+ @Override
+ public ImmutableSet<Integer> shardIds() {
+ return null;
+ }
+
+ @Override
+ public boolean hasShard(int shardId) {
+ return false;
+ }
+
+ @Override
+ public IndexShard shard(int shardId) {
+ return null;
+ }
+
+ @Override
+ public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
+ return null;
+ }
+
+ @Override
+ public Injector shardInjector(int shardId) {
+ return null;
+ }
+
+ @Override
+ public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
+ return null;
+ }
+
+ @Override
+ public String indexUUID() {
+ return IndexMetaData.INDEX_UUID_NA_VALUE;
+ }
+
+ @Override
+ public Index index() {
+ return null;
+ }
+
+ @Override
+ public Iterator<IndexShard> iterator() {
+ return null;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/src/test/java/org/elasticsearch/index/codec/CodecTests.java
new file mode 100644
index 0000000..e1ba608
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/codec/CodecTests.java
@@ -0,0 +1,430 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat;
+import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
+import org.apache.lucene.codecs.lucene40.Lucene40Codec;
+import org.apache.lucene.codecs.lucene41.Lucene41Codec;
+import org.apache.lucene.codecs.lucene42.Lucene42Codec;
+import org.apache.lucene.codecs.lucene45.Lucene45Codec;
+import org.apache.lucene.codecs.lucene45.Lucene45DocValuesFormat;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.apache.lucene.codecs.memory.DirectPostingsFormat;
+import org.apache.lucene.codecs.memory.MemoryDocValuesFormat;
+import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
+import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
+import org.apache.lucene.codecs.pulsing.Pulsing41PostingsFormat;
+import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.codec.docvaluesformat.*;
+import org.elasticsearch.index.codec.postingsformat.*;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class CodecTests extends ElasticsearchLuceneTestCase {
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ forceDefaultCodec(); // we test against default codec so never get a random one here!
+ }
+
+ @Test
+ public void testResolveDefaultCodecs() throws Exception {
+ CodecService codecService = createCodecService();
+ assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
+ assertThat(codecService.codec("default"), instanceOf(Lucene46Codec.class));
+ assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class));
+ assertThat(codecService.codec("Lucene45"), instanceOf(Lucene45Codec.class));
+ assertThat(codecService.codec("Lucene40"), instanceOf(Lucene40Codec.class));
+ assertThat(codecService.codec("Lucene41"), instanceOf(Lucene41Codec.class));
+ assertThat(codecService.codec("Lucene42"), instanceOf(Lucene42Codec.class));
+ assertThat(codecService.codec("SimpleText"), instanceOf(SimpleTextCodec.class));
+ }
+
+ @Test
+ public void testResolveDefaultPostingFormats() throws Exception {
+ PostingsFormatService postingsFormatService = createCodecService().postingsFormatService();
+ assertThat(postingsFormatService.get("default"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("default").get(), instanceOf(Elasticsearch090PostingsFormat.class));
+
+ // Should fail when upgrading Lucene with codec changes
+ assertThat(((Elasticsearch090PostingsFormat)postingsFormatService.get("default").get()).getDefaultWrapped(), instanceOf(((PerFieldPostingsFormat) Codec.getDefault().postingsFormat()).getPostingsFormatForField("").getClass()));
+ assertThat(postingsFormatService.get("Lucene41"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ // Should fail when upgrading Lucene with codec changes
+ assertThat(postingsFormatService.get("Lucene41").get(), instanceOf(((PerFieldPostingsFormat) Codec.getDefault().postingsFormat()).getPostingsFormatForField(null).getClass()));
+
+ assertThat(postingsFormatService.get("bloom_default"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(postingsFormatService.get("bloom_default").get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(postingsFormatService.get("bloom_default").get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+ assertThat(postingsFormatService.get("BloomFilter"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("BloomFilter").get(), instanceOf(BloomFilteringPostingsFormat.class));
+
+ assertThat(postingsFormatService.get("XBloomFilter"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("XBloomFilter").get(), instanceOf(BloomFilterPostingsFormat.class));
+
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(postingsFormatService.get("bloom_pulsing").get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(postingsFormatService.get("bloom_pulsing").get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(postingsFormatService.get("pulsing"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("pulsing").get(), instanceOf(Pulsing41PostingsFormat.class));
+ assertThat(postingsFormatService.get("Pulsing41"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Pulsing41").get(), instanceOf(Pulsing41PostingsFormat.class));
+
+ assertThat(postingsFormatService.get("memory"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("memory").get(), instanceOf(MemoryPostingsFormat.class));
+ assertThat(postingsFormatService.get("Memory"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Memory").get(), instanceOf(MemoryPostingsFormat.class));
+
+ assertThat(postingsFormatService.get("direct"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("direct").get(), instanceOf(DirectPostingsFormat.class));
+ assertThat(postingsFormatService.get("Direct"), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(postingsFormatService.get("Direct").get(), instanceOf(DirectPostingsFormat.class));
+ }
+
+ @Test
+ public void testResolveDefaultDocValuesFormats() throws Exception {
+ DocValuesFormatService docValuesFormatService = createCodecService().docValuesFormatService();
+
+ for (String dvf : Arrays.asList("memory", "disk", "Disk", "default")) {
+ assertThat(docValuesFormatService.get(dvf), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ }
+ assertThat(docValuesFormatService.get("memory").get(), instanceOf(MemoryDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("disk").get(), instanceOf(DiskDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("Disk").get(), instanceOf(DiskDocValuesFormat.class));
+ assertThat(docValuesFormatService.get("default").get(), instanceOf(Lucene45DocValuesFormat.class));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_default() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "default").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "default")
+ .put("index.codec.postings_format.my_format1.min_block_size", 16)
+ .put("index.codec.postings_format.my_format1.max_block_size", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(Elasticsearch090PostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(DefaultPostingsFormatProvider.class));
+ DefaultPostingsFormatProvider provider = (DefaultPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.minBlockSize(), equalTo(16));
+ assertThat(provider.maxBlockSize(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_memory() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "memory").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "memory")
+ .put("index.codec.postings_format.my_format1.pack_fst", true)
+ .put("index.codec.postings_format.my_format1.acceptable_overhead_ratio", 0.3f)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(MemoryPostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(MemoryPostingsFormatProvider.class));
+ MemoryPostingsFormatProvider provider = (MemoryPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.packFst(), equalTo(true));
+ assertThat(provider.acceptableOverheadRatio(), equalTo(0.3f));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_direct() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "direct").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "direct")
+ .put("index.codec.postings_format.my_format1.min_skip_count", 16)
+ .put("index.codec.postings_format.my_format1.low_freq_cutoff", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(DirectPostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(DirectPostingsFormatProvider.class));
+ DirectPostingsFormatProvider provider = (DirectPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.minSkipCount(), equalTo(16));
+ assertThat(provider.lowFreqCutoff(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMapping_pulsing() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "pulsing").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "pulsing")
+ .put("index.codec.postings_format.my_format1.freq_cut_off", 2)
+ .put("index.codec.postings_format.my_format1.min_block_size", 32)
+ .put("index.codec.postings_format.my_format1.max_block_size", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(Pulsing41PostingsFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(PulsingPostingsFormatProvider.class));
+ PulsingPostingsFormatProvider provider = (PulsingPostingsFormatProvider) documentMapper.mappers().name("field2").mapper().postingsFormatProvider();
+ assertThat(provider.freqCutOff(), equalTo(2));
+ assertThat(provider.minBlockSize(), equalTo(32));
+ assertThat(provider.maxBlockSize(), equalTo(64));
+ }
+
+ @Test
+ public void testResolvePostingFormatsFromMappingLuceneBloom() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("postings_format", "bloom_default").endObject()
+ .startObject("field2").field("type", "string").field("postings_format", "bloom_pulsing").endObject()
+ .startObject("field3").field("type", "string").field("postings_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.postings_format.my_format1.type", "bloom_filter_lucene")
+ .put("index.codec.postings_format.my_format1.desired_max_saturation", 0.2f)
+ .put("index.codec.postings_format.my_format1.saturation_limit", 0.8f)
+ .put("index.codec.postings_format.my_format1.delegate", "delegate1")
+ .put("index.codec.postings_format.delegate1.type", "direct")
+ .put("index.codec.postings_format.delegate1.min_skip_count", 16)
+ .put("index.codec.postings_format.delegate1.low_freq_cutoff", 64)
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(documentMapper.mappers().name("field1").mapper().postingsFormatProvider().get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ if (PostingFormats.luceneBloomFilter) {
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider().get(), instanceOf(BloomFilteringPostingsFormat.class));
+ } else {
+ assertThat(documentMapper.mappers().name("field2").mapper().postingsFormatProvider().get(), instanceOf(BloomFilterPostingsFormat.class));
+ }
+
+ assertThat(documentMapper.mappers().name("field3").mapper().postingsFormatProvider(), instanceOf(BloomFilterLucenePostingsFormatProvider.class));
+ BloomFilterLucenePostingsFormatProvider provider = (BloomFilterLucenePostingsFormatProvider) documentMapper.mappers().name("field3").mapper().postingsFormatProvider();
+ assertThat(provider.desiredMaxSaturation(), equalTo(0.2f));
+ assertThat(provider.saturationLimit(), equalTo(0.8f));
+ assertThat(provider.delegate(), instanceOf(DirectPostingsFormatProvider.class));
+ DirectPostingsFormatProvider delegate = (DirectPostingsFormatProvider) provider.delegate();
+ assertThat(delegate.minSkipCount(), equalTo(16));
+ assertThat(delegate.lowFreqCutoff(), equalTo(64));
+ }
+
+ @Test
+ public void testChangeUidPostingsFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_uid").field("postings_format", "memory").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).postingsFormatProvider(), instanceOf(PreBuiltPostingsFormatProvider.class));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).postingsFormatProvider().get(), instanceOf(MemoryPostingsFormat.class));
+ }
+
+ @Test
+ public void testChangeUidDocValuesFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_uid").startObject("fielddata").field("format", "doc_values").endObject().field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).hasDocValues(), equalTo(true));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(UidFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ @Test
+ public void testChangeIdDocValuesFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").startObject("fielddata").field("format", "doc_values").endObject().field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).hasDocValues(), equalTo(true));
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(IdFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_default() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "default").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "default")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(Lucene45DocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(DefaultDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_memory() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "memory").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "memory")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(MemoryDocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(MemoryDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testResolveDocValuesFormatsFromMapping_disk() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("doc_values_format", "disk").endObject()
+ .startObject("field2").field("type", "double").field("doc_values_format", "my_format1").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.codec.doc_values_format.my_format1.type", "disk")
+ .build();
+ CodecService codecService = createCodecService(indexSettings);
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.mappers().name("field1").mapper().docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+
+ assertThat(documentMapper.mappers().name("field2").mapper().docValuesFormatProvider(), instanceOf(DiskDocValuesFormatProvider.class));
+ }
+
+ @Test
+ public void testChangeVersionFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_version").field("doc_values_format", "disk").endObject()
+ .endObject().endObject().string();
+
+ CodecService codecService = createCodecService();
+ DocumentMapper documentMapper = codecService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.rootMapper(VersionFieldMapper.class).docValuesFormatProvider(), instanceOf(PreBuiltDocValuesFormatProvider.class));
+ assertThat(documentMapper.rootMapper(VersionFieldMapper.class).docValuesFormatProvider().get(), instanceOf(DiskDocValuesFormat.class));
+ }
+
+ private static CodecService createCodecService() {
+ return createCodecService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ private static CodecService createCodecService(Settings settings) {
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder()
+ .add(new SettingsModule(settings))
+ .add(new IndexNameModule(index))
+ .add(new IndexSettingsModule(index, settings))
+ .add(new SimilarityModule(settings))
+ .add(new CodecModule(settings))
+ .add(new MapperServiceModule())
+ .add(new AnalysisModule(settings))
+ .add(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ })
+ .createInjector();
+ return injector.getInstance(CodecService.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java b/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java
new file mode 100644
index 0000000..a8e3a14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/codec/postingformat/DefaultPostingsFormatTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingformat;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Simple smoke test for {@link org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat}
+ */
+public class DefaultPostingsFormatTests extends ElasticsearchTestCase {
+
+ private final class TestCodec extends Lucene46Codec {
+
+ @Override
+ public PostingsFormat getPostingsFormatForField(String field) {
+ return new Elasticsearch090PostingsFormat();
+ }
+ }
+
+ @Test
+ public void testUseDefault() throws IOException {
+
+ Codec codec = new TestCodec();
+ Directory d = new RAMDirectory();
+ IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION));
+ config.setCodec(codec);
+ IndexWriter writer = new IndexWriter(d, config);
+ writer.addDocument(Arrays.asList(new TextField("foo", "bar", Store.YES), new TextField(UidFieldMapper.NAME, "1234", Store.YES)));
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(writer, false);
+ List<AtomicReaderContext> leaves = reader.leaves();
+ assertThat(leaves.size(), equalTo(1));
+ AtomicReader ar = leaves.get(0).reader();
+ Terms terms = ar.terms("foo");
+ Terms uidTerms = ar.terms(UidFieldMapper.NAME);
+
+ assertThat(terms.size(), equalTo(1l));
+ assertThat(terms, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ assertThat(uidTerms, instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class));
+
+ reader.close();
+ writer.close();
+ d.close();
+ }
+
+ @Test
+ public void testNoUIDField() throws IOException {
+
+ Codec codec = new TestCodec();
+ Directory d = new RAMDirectory();
+ IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, new WhitespaceAnalyzer(Lucene.VERSION));
+ config.setCodec(codec);
+ IndexWriter writer = new IndexWriter(d, config);
+ for (int i = 0; i < 100; i++) {
+ writer.addDocument(Arrays.asList(new TextField("foo", "foo bar foo bar", Store.YES), new TextField("some_other_field", "1234", Store.YES)));
+ }
+ Merges.forceMerge(writer, 1);
+ writer.commit();
+
+ DirectoryReader reader = DirectoryReader.open(writer, false);
+ List<AtomicReaderContext> leaves = reader.leaves();
+ assertThat(leaves.size(), equalTo(1));
+ AtomicReader ar = leaves.get(0).reader();
+ Terms terms = ar.terms("foo");
+ Terms some_other_field = ar.terms("some_other_field");
+
+ assertThat(terms.size(), equalTo(2l));
+ assertThat(terms, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ assertThat(some_other_field, not(instanceOf(BloomFilterPostingsFormat.BloomFilteredTerms.class)));
+ TermsEnum iterator = terms.iterator(null);
+ Set<String> expected = new HashSet<String>();
+ expected.add("foo");
+ expected.add("bar");
+ while(iterator.next() != null) {
+ expected.remove(iterator.term().utf8ToString());
+ }
+ assertThat(expected.size(), equalTo(0));
+ reader.close();
+ writer.close();
+ d.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
new file mode 100644
index 0000000..eed806c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.index.DirectoryReader.listCommits;
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * A set of tests for {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy}.
+ */
+public class SnapshotDeletionPolicyTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ private RAMDirectory dir;
+ private SnapshotDeletionPolicy deletionPolicy;
+ private IndexWriter indexWriter;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ dir = new RAMDirectory();
+ deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
+ indexWriter = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, Lucene.STANDARD_ANALYZER)
+ .setIndexDeletionPolicy(deletionPolicy)
+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ indexWriter.close();
+ dir.close();
+ }
+
+ private Document testDocument() {
+ Document document = new Document();
+ document.add(new TextField("test", "1", Field.Store.YES));
+ return document;
+ }
+
+ @Test
+ public void testSimpleSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the commit, add a document and commit, now we should be back to one commit point
+ assertThat(snapshot.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // take two snapshots
+ SnapshotIndexCommit snapshot1 = deletionPolicy.snapshot();
+ SnapshotIndexCommit snapshot2 = deletionPolicy.snapshot();
+
+ // we should have two commits points
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release one snapshot, we should still have two commit points
+ assertThat(snapshot1.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the second snapshot, we should be back to one commit
+ assertThat(snapshot2.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiReleaseException() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and release it twice, the seconds should throw an exception
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ assertThat(snapshot.release(), equalTo(true));
+ assertThat(snapshot.release(), equalTo(false));
+ }
+
+ @Test
+ public void testSimpleSnapshots() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commint point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // now, take a snapshot of all the commits
+ SnapshotIndexCommits snapshots = deletionPolicy.snapshots();
+ assertThat(snapshots.size(), equalTo(2));
+
+ // release the snapshot, add a document and commit
+ // we should have 3 commits points since we are holding onto the first two with snapshots
+ // and we are using the keep only last
+ assertThat(snapshot.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(3));
+
+ // now release the snapshots, we should be back to a single commit point
+ assertThat(snapshots.release(), equalTo(true));
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
new file mode 100644
index 0000000..c3d00f6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class SnapshotIndexCommitExistsMatcher extends TypeSafeMatcher<SnapshotIndexCommit> {
+
+ @Override
+ public boolean matchesSafely(SnapshotIndexCommit snapshotIndexCommit) {
+ for (String fileName : snapshotIndexCommit.getFiles()) {
+ try {
+ if (!snapshotIndexCommit.getDirectory().fileExists(fileName)) {
+ return false;
+ }
+ } catch (IOException e) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("an index commit existence");
+ }
+
+ public static Matcher<SnapshotIndexCommit> snapshotIndexCommitExists() {
+ return new SnapshotIndexCommitExistsMatcher();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
new file mode 100644
index 0000000..bdd72e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher<Engine.Searcher> {
+
+ private final Query query;
+
+ private final int totalHits;
+
+ public EngineSearcherTotalHitsMatcher(Query query, int totalHits) {
+ this.query = query;
+ this.totalHits = totalHits;
+ }
+
+ @Override
+ public boolean matchesSafely(Engine.Searcher searcher) {
+ try {
+ long count = Lucene.count(searcher.searcher(), query);
+ return count == totalHits;
+ } catch (IOException e) {
+ return false;
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("total hits of size ").appendValue(totalHits).appendText(" with query ").appendValue(query);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(Query query, int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(query, totalHits);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(Queries.newMatchAllQuery(), totalHits);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java
new file mode 100644
index 0000000..ee0ccc8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineIntegrationTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+public class InternalEngineIntegrationTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testSettingLoadBloomFilterDefaultTrue() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ ensureGreen();
+ refresh();
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ final long segmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("segments with bloom: {}", segmentsMemoryWithBloom);
+
+ logger.info("updating the setting to unload bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ logger.info("waiting for memory to match without blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long segmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments without bloom: {}", segmentsMemoryWithoutBloom);
+ return segmentsMemoryWithoutBloom == (segmentsMemoryWithBloom - BloomFilter.Factory.DEFAULT.createFilter(1).getSizeInBytes());
+ }
+ });
+
+ logger.info("updating the setting to load bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, true)).get();
+ logger.info("waiting for memory to match with blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long newSegmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments with bloom: {}", newSegmentsMemoryWithBloom);
+ return newSegmentsMemoryWithBloom == segmentsMemoryWithBloom;
+ }
+ });
+ }
+
+ @Test
+ @Slow
+ public void testSettingLoadBloomFilterDefaultFalse() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1).put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ ensureGreen();
+ refresh();
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ final long segmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("segments without bloom: {}", segmentsMemoryWithoutBloom);
+
+ logger.info("updating the setting to load bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, true)).get();
+ logger.info("waiting for memory to match with blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long segmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments with bloom: {}", segmentsMemoryWithoutBloom);
+ return segmentsMemoryWithoutBloom == (segmentsMemoryWithBloom - BloomFilter.Factory.DEFAULT.createFilter(1).getSizeInBytes());
+ }
+ });
+
+ logger.info("updating the setting to unload bloom filters");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
+ logger.info("waiting for memory to match without blooms");
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ long newSegmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
+ logger.info("trying segments without bloom: {}", newSegmentsMemoryWithoutBloom);
+ return newSegmentsMemoryWithoutBloom == segmentsMemoryWithoutBloom;
+ }
+ });
+ }
+
+ @Test
+ public void testSetIndexCompoundOnFlush() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 1, "test");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, false)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 2, "test");
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(2, 3, "test");
+ }
+
+ private void assertTotalCompoundSegments(int i, int t, String index) {
+ IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().prepareSegments(index).get();
+ IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(index);
+ Collection<IndexShardSegments> values = indexSegments.getShards().values();
+ int compounds = 0;
+ int total = 0;
+ for (IndexShardSegments indexShardSegments : values) {
+ for (ShardSegments s : indexShardSegments) {
+ for (Segment segment : s) {
+ if (segment.isSearch() && segment.getNumDocs() > 0) {
+ if (segment.isCompound()) {
+ compounds++;
+ }
+ total++;
+ }
+ }
+ }
+ }
+ assertThat(compounds, Matchers.equalTo(i));
+ assertThat(total, Matchers.equalTo(t));
+
+ }
+
+ @Test
+ public void test4093() {
+ cluster().ensureAtMostNumNodes(1);
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.store.type", "memory")
+ .put("cache.memory.large_cache_size", new ByteSizeValue(1, ByteSizeUnit.MB)) // no need to cache a lot
+ .put("index.number_of_shards", "1")
+ .put("index.number_of_replicas", "0")
+ .put("gateway.type", "none")
+ .put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, randomBoolean())
+ .put("index.warmer.enabled", false)
+ .build()).get());
+ NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().setJvm(true).get();
+ NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ ByteSizeValue directMemoryMax = info.getJvm().getMem().getDirectMemoryMax();
+ logger.debug(" --> JVM max direct memory for node [{}] is set to [{}]", info.getNode().getName(), directMemoryMax);
+ }
+ final int numDocs = between(30, 50); // 30 docs are enough to fail without the fix for #4093
+ logger.debug(" --> Indexing [{}] documents", numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ if ((i + 1) % 10 == 0) {
+ logger.debug(" --> Indexed [{}] documents", i + 1);
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ }
+ client().prepareIndex("test", "type1")
+ .setSource("a", "" + i)
+ .setRefresh(true)
+ .execute()
+ .actionGet();
+ }
+ logger.debug(" --> Done indexing [{}] documents", numDocs);
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java
new file mode 100644
index 0000000..0d99093
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java
@@ -0,0 +1,1162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine.internal;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
+import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommitExistsMatcher;
+import org.elasticsearch.index.engine.*;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.merge.OnGoingMerge;
+import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.distributor.LeastUsedDistributor;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogSizeMatcher;
+import org.elasticsearch.index.translog.fs.FsTranslog;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.MatcherAssert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class InternalEngineTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected ThreadPool threadPool;
+
+ private Store store;
+ private Store storeReplica;
+
+ protected Engine engine;
+ protected Engine replicaEngine;
+
+ private IndexSettingsService engineSettingsService;
+
+ private IndexSettingsService replicaSettingsService;
+
+ private Settings defaultSettings;
+
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ defaultSettings = ImmutableSettings.builder()
+ .put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, getRandom().nextBoolean())
+ .build(); // TODO randomize more settings
+ threadPool = new ThreadPool();
+ store = createStore();
+ store.deleteContent();
+ storeReplica = createStoreReplica();
+ storeReplica.deleteContent();
+ engineSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
+ engine = createEngine(engineSettingsService, store, createTranslog());
+ engine.start();
+ replicaSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
+ replicaEngine = createEngine(replicaSettingsService, storeReplica, createTranslogReplica());
+ replicaEngine.start();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ replicaEngine.close();
+ storeReplica.close();
+
+ engine.close();
+ store.close();
+
+ if (threadPool != null) {
+ threadPool.shutdownNow();
+ }
+ }
+
+ private Document testDocumentWithTextField() {
+ Document document = testDocument();
+ document.add(new TextField("value", "test", Field.Store.YES));
+ return document;
+ }
+
+ private Document testDocument() {
+ return new Document();
+ }
+
+
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, Document document, Analyzer analyzer, BytesReference source, boolean mappingsModified) {
+ Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
+ Field versionField = new NumericDocValuesField("_version", 0);
+ document.add(uidField);
+ document.add(versionField);
+ return new ParsedDocument(uidField, versionField, id, type, routing, timestamp, ttl, Arrays.asList(document), analyzer, source, mappingsModified);
+ }
+
+ protected Store createStore() throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, EMPTY_SETTINGS, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+ protected Store createStoreReplica() throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, EMPTY_SETTINGS, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+ protected Translog createTranslog() {
+ return new FsTranslog(shardId, EMPTY_SETTINGS, new File("work/fs-translog/primary"));
+ }
+
+ protected Translog createTranslogReplica() {
+ return new FsTranslog(shardId, EMPTY_SETTINGS, new File("work/fs-translog/replica"));
+ }
+
+ protected IndexDeletionPolicy createIndexDeletionPolicy() {
+ return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS);
+ }
+
+ protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
+ return new SnapshotDeletionPolicy(createIndexDeletionPolicy());
+ }
+
+ protected MergePolicyProvider<?> createMergePolicy() {
+ return new LogByteSizeMergePolicyProvider(store, new IndexSettingsService(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ protected MergeSchedulerProvider createMergeScheduler() {
+ return new SerialMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool);
+ }
+
+ protected Engine createEngine(IndexSettingsService indexSettingsService, Store store, Translog translog) {
+ return createEngine(indexSettingsService, store, translog, createMergeScheduler());
+ }
+
+ protected Engine createEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) {
+ return new InternalEngine(shardId, defaultSettings, threadPool, indexSettingsService, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider,
+ new AnalysisService(shardId.index()), new SimilarityService(shardId.index()), new CodecService(shardId.index()));
+ }
+
+ protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
+ protected static final BytesReference B_2 = new BytesArray(new byte[]{2});
+ protected static final BytesReference B_3 = new BytesArray(new byte[]{3});
+
+ @Test
+ public void testSegments() throws Exception {
+ List<Segment> segments = engine.segments();
+ assertThat(segments.isEmpty(), equalTo(true));
+ assertThat(engine.segmentsStats().getCount(), equalTo(0l));
+ final boolean defaultCompound = defaultSettings.getAsBoolean(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true);
+
+ // create a doc and refresh
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(1));
+ assertThat(engine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(false));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ engine.flush(new Engine.Flush());
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(1));
+ assertThat(engine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ engineSettingsService.refreshSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, false).build());
+
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ engineSettingsService.refreshSettings(ImmutableSettings.builder().put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, true).build());
+ ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("4"), doc4));
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ segments = engine.segments();
+ assertThat(segments.size(), equalTo(3));
+ assertThat(engine.segmentsStats().getCount(), equalTo(3l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ assertThat(segments.get(2).isCommitted(), equalTo(false));
+ assertThat(segments.get(2).isSearch(), equalTo(true));
+ assertThat(segments.get(2).getNumDocs(), equalTo(1));
+ assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(2).isCompound(), equalTo(true));
+ }
+
+ @Test
+ public void testSegmentsWithMergeFlag() throws Exception {
+ ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool);
+ final AtomicReference<CountDownLatch> waitTillMerge = new AtomicReference<CountDownLatch>();
+ final AtomicReference<CountDownLatch> waitForMerge = new AtomicReference<CountDownLatch>();
+ mergeSchedulerProvider.addListener(new MergeSchedulerProvider.Listener() {
+ @Override
+ public void beforeMerge(OnGoingMerge merge) {
+ try {
+ if (waitTillMerge.get() != null) {
+ waitTillMerge.get().countDown();
+ }
+ if (waitForMerge.get() != null) {
+ waitForMerge.get().await();
+ }
+ } catch (InterruptedException e) {
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ }
+
+ @Override
+ public void afterMerge(OnGoingMerge merge) {
+ }
+ });
+
+ Engine engine = createEngine(engineSettingsService, store, createTranslog(), mergeSchedulerProvider);
+ engine.start();
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(1));
+ index = new Engine.Index(null, newUid("2"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(2));
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+ index = new Engine.Index(null, newUid("3"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+ assertThat(engine.segments().size(), equalTo(3));
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ waitTillMerge.set(new CountDownLatch(1));
+ waitForMerge.set(new CountDownLatch(1));
+ engine.optimize(new Engine.Optimize().maxNumSegments(1).waitForMerge(false));
+ waitTillMerge.get().await();
+
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), notNullValue());
+ }
+
+ waitForMerge.get().countDown();
+
+ index = new Engine.Index(null, newUid("4"), doc);
+ engine.index(index);
+ engine.flush(new Engine.Flush());
+
+ // now, optimize and wait for merges, see that we have no merge flag
+ engine.optimize(new Engine.Optimize().flush(true).maxNumSegments(1).waitForMerge(true));
+
+ for (Segment segment : engine.segments()) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ engine.close();
+ }
+
+ @Test
+ public void testSimpleOperations() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.release();
+
+ // create a document
+ Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.release();
+
+ // but, we can still get it (in realtime)
+ Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // but, not there non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.release();
+
+ // also in non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ document.add(new Field(SourceFieldMapper.NAME, B_2.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // but, we can still get it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_2.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // refresh and it should be updated
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ // now delete
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+
+ // its not deleted yet
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ // but, get should not see it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // refresh and it should be deleted
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // add it back
+ document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // now flush
+ engine.flush(new Engine.Flush());
+
+ // and, verify get (in real time)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // make sure we can still work with the engine
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.release();
+
+ // refresh and it should be updated
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.release();
+
+ engine.close();
+ }
+
+ @Test
+ public void testSearchResultRelease() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.release();
+
+ // create a document
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.release();
+
+ // refresh and it should be there
+ engine.refresh(new Engine.Refresh("test").force(false));
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ // don't release the search result yet...
+
+ // delete, refresh and do a new search, it should not be there
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh(new Engine.Refresh("test").force(false));
+ Engine.Searcher updateSearchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ updateSearchResult.release();
+
+ // the non release search result should not see the deleted yet...
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.release();
+ }
+
+ @Test
+ public void testSimpleSnapshot() throws Exception {
+ // create a document
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+
+ final ExecutorService executorService = Executors.newCachedThreadPool();
+
+ engine.snapshot(new Engine.SnapshotHandler<Void>() {
+ @Override
+ public Void snapshot(final SnapshotIndexCommit snapshotIndexCommit1, final Translog.Snapshot translogSnapshot1) {
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ assertThat(translogSnapshot1.hasNext(), equalTo(true));
+ Translog.Create create1 = (Translog.Create) translogSnapshot1.next();
+ assertThat(create1.source().toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(translogSnapshot1.hasNext(), equalTo(false));
+
+ Future<Object> future = executorService.submit(new Callable<Object>() {
+ @Override
+ public Object call() throws Exception {
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ return null;
+ }
+ });
+
+ try {
+ future.get();
+ } catch (Exception e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+
+ engine.snapshot(new Engine.SnapshotHandler<Void>() {
+ @Override
+ public Void snapshot(SnapshotIndexCommit snapshotIndexCommit2, Translog.Snapshot translogSnapshot2) throws EngineException {
+ MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ MatcherAssert.assertThat(snapshotIndexCommit2, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
+ assertThat(snapshotIndexCommit2.getSegmentsFileName(), not(equalTo(snapshotIndexCommit1.getSegmentsFileName())));
+ assertThat(translogSnapshot2.hasNext(), equalTo(true));
+ Translog.Create create3 = (Translog.Create) translogSnapshot2.next();
+ assertThat(create3.source().toBytesArray(), equalTo(B_3.toBytesArray()));
+ assertThat(translogSnapshot2.hasNext(), equalTo(false));
+ return null;
+ }
+ });
+ return null;
+ }
+ });
+
+ engine.close();
+ }
+
+ @Test
+ public void testSimpleRecover() throws Exception {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ engine.flush(new Engine.Flush());
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ try {
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ try {
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ try {
+ // we can do this here since we are on the same thread
+ engine.flush(new Engine.Flush());
+ assertThat("flush is not allowed in phase 3", false, equalTo(true));
+ } catch (FlushNotAllowedEngineException e) {
+ // all is well
+ }
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testRecoverWithOperationsBetweenPhase1AndPhase2() throws Exception {
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytesArray(), equalTo(B_2));
+ assertThat(snapshot.hasNext(), equalTo(false));
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testRecoverWithOperationsBetweenPhase1AndPhase2AndPhase3() throws Exception {
+ ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
+ engine.create(new Engine.Create(null, newUid("1"), doc1));
+ engine.flush(new Engine.Flush());
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+
+ engine.recover(new Engine.RecoveryHandler() {
+ @Override
+ public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
+ }
+
+ @Override
+ public void phase2(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(create.source().toBytesArray(), equalTo(B_2));
+
+ // add for phase3
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ }
+
+ @Override
+ public void phase3(Translog.Snapshot snapshot) throws EngineException {
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(create.source().toBytesArray(), equalTo(B_3));
+ }
+ });
+
+ engine.flush(new Engine.Flush());
+ engine.close();
+ }
+
+ @Test
+ public void testVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc).version(create.version()).origin(REPLICA);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.create(create);
+ assertThat(create.version(), equalTo(12l));
+
+ create = new Engine.Create(null, newUid("1"), doc).version(create.version()).origin(REPLICA);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(index.version()).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(index.version()).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ index = new Engine.Index(null, newUid("1"), doc).version(1l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc).version(3l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(14);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(13l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc).version(1l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc).version(3l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(14);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(13);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningDeleteConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")).version(1l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1")).version(2l);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc).version(2l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).version(2l);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningDeleteConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush(new Engine.Flush());
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")).version(1l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ engine.flush(new Engine.Flush());
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1")).version(2l);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ engine.flush(new Engine.Flush());
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc).version(2l);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc).version(2l);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsException() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsExceptionWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ engine.flush(new Engine.Flush());
+
+ create = new Engine.Create(null, newUid("1"), doc);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict1() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // apply the second index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now, the old one should not work
+ index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
+ try {
+ replicaEngine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // second version on replica should fail as well
+ try {
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict2() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // apply the first index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // index it again
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now delete it
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"));
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // apply the delete on the replica (skipping the second index)
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
+ replicaEngine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // second time delete with same version should fail
+ try {
+ delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
+ replicaEngine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now do the second index on the replica, it should fail
+ try {
+ index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+
+ @Test
+ public void testBasicCreatedFlag() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertFalse(index.created());
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+ }
+
+ @Test
+ public void testCreatedFlagAfterFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ engine.flush(new Engine.Flush());
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertTrue(index.created());
+ }
+
+ protected Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
new file mode 100644
index 0000000..b7efdbb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
@@ -0,0 +1,358 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.HashedBytesRef;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public abstract class AbstractFieldDataImplTests extends AbstractFieldDataTests {
+
+ protected String one() {
+ return "1";
+ }
+
+ protected String two() {
+ return "2";
+ }
+
+ protected String three() {
+ return "3";
+ }
+
+ protected String four() {
+ return "4";
+ }
+
+ protected String toString(Object value) {
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).utf8ToString();
+ }
+ return value.toString();
+ }
+
+ protected abstract void fillSingleValueAllSet() throws Exception;
+
+ protected abstract void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception;
+
+ @Test
+ public void testDeletedDocs() throws Exception {
+ add2SingleValuedDocumentsAndDeleteOneOfThem();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ BytesValues values = fieldData.getBytesValues(randomBoolean());
+ for (int i = 0; i < fieldData.getNumDocs(); ++i) {
+ assertThat(values.setDocument(i), greaterThanOrEqualTo(1));
+ }
+ }
+
+ @Test
+ public void testSingleValueAllSet() throws Exception {
+ fillSingleValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertThat(bytesValues.setDocument(0), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(two())));
+ assertThat(bytesValues.setDocument(1), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(one())));
+ assertThat(bytesValues.setDocument(2), equalTo(1));
+ assertThat(bytesValues.nextValue(), equalTo(new BytesRef(three())));
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+ assertHashedValues(hashedBytesValues, 0, two());
+ assertHashedValues(hashedBytesValues, 1, one());
+ assertHashedValues(hashedBytesValues, 2, three());
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one()));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two()));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three()));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ private HashedBytesRef convert(BytesValues values, int doc) {
+ if (values.setDocument(doc) > 0) {
+ return new HashedBytesRef(BytesRef.deepCopyOf(values.nextValue()), values.currentValueHash());
+ } else {
+ return new HashedBytesRef(new BytesRef());
+ }
+ }
+
+ protected abstract void fillSingleValueWithMissing() throws Exception;
+
+ public void assertValues(BytesValues values, int docId, BytesRef... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(actualValues[i]));
+ }
+ }
+
+ public void assertValues(BytesValues values, int docId, String... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new BytesRef(actualValues[i])));
+ }
+ }
+
+ public void assertHashedValues(BytesValues values, int docId, BytesRef... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ BytesRef r = new BytesRef();
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
+ assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
+
+ }
+ }
+
+ public void assertHashedValues(BytesValues values, int docId, String... actualValues) {
+ assertThat(values.setDocument(docId), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
+ assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
+ }
+ }
+
+
+ @Test
+ public void testSingleValueWithMissing() throws Exception {
+ fillSingleValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData
+ .getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, three());
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(hashedBytesValues, 0, two());
+ assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(hashedBytesValues, 2, three());
+
+
+ }
+
+ protected abstract void fillMultiValueAllSet() throws Exception;
+
+ @Test
+ public void testMultiValueAllSet() throws Exception {
+ fillMultiValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(true));
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(hashedBytesValues, 0, two(), four());
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ protected abstract void fillMultiValueWithMissing() throws Exception;
+
+ @Test
+ public void testMultiValueWithMissing() throws Exception {
+ fillMultiValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(true));
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+
+ assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
+ assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
+ assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
+
+ assertHashedValues(bytesValues, 0, two(), four());
+ assertHashedValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(bytesValues, 2, three());
+
+ assertHashedValues(hashedBytesValues, 0, two(), four());
+ assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertHashedValues(hashedBytesValues, 2, three());
+
+ }
+
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ // Some impls (FST) return size 0 and some (PagedBytes) do take size in the case no actual data is loaded
+ assertThat(fieldData.getMemorySizeInBytes(), greaterThanOrEqualTo(0l));
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertThat(bytesValues.isMultiValued(), equalTo(false));
+
+ assertValues(bytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, Strings.EMPTY_ARRAY);
+ BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
+
+ assertValues(hashedBytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 2, Strings.EMPTY_ARRAY);
+ }
+
+ protected abstract void fillAllMissing() throws Exception;
+
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("02"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("!10"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+ }
+
+ protected abstract void fillExtendedMvSet() throws Exception;
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
new file mode 100644
index 0000000..43a0878
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+
+// we might wanna cut this over to LuceneTestCase
+public abstract class AbstractFieldDataTests extends ElasticsearchTestCase {
+
+ protected IndexFieldDataService ifdService;
+ protected IndexWriter writer;
+ protected AtomicReaderContext readerContext;
+
+ protected abstract FieldDataType getFieldDataType();
+
+ protected boolean hasDocValues() {
+ return false;
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(String fieldName) {
+ return getForField(getFieldDataType(), fieldName);
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(FieldDataType type, String fieldName) {
+ final FieldMapper<?> mapper;
+ final BuilderContext context = new BuilderContext(null, new ContentPath(1));
+ if (type.getType().equals("string")) {
+ mapper = MapperBuilders.stringField(fieldName).tokenized(false).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("float")) {
+ mapper = MapperBuilders.floatField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("double")) {
+ mapper = MapperBuilders.doubleField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("long")) {
+ mapper = MapperBuilders.longField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("int")) {
+ mapper = MapperBuilders.integerField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("short")) {
+ mapper = MapperBuilders.shortField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("byte")) {
+ mapper = MapperBuilders.byteField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("geo_point")) {
+ mapper = MapperBuilders.geoPointField(fieldName).fieldDataSettings(type.getSettings()).build(context);
+ } else {
+ throw new UnsupportedOperationException(type.getType());
+ }
+ return ifdService.getForField(mapper);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ // LogByteSizeMP to preserve doc ID order
+ writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)).setMergePolicy(new LogByteSizeMergePolicy()));
+ }
+
+ protected AtomicReaderContext refreshReader() throws Exception {
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
+ readerContext = reader.getContext();
+ return readerContext;
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ writer.close();
+ ifdService.clear();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
new file mode 100644
index 0000000..817a7c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.search.*;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumericFieldDataTests extends AbstractFieldDataImplTests {
+
+ protected abstract FieldDataType getFieldDataType();
+
+ @Test
+ public void testSingleValueAllSetNumber() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(2l));
+
+ assertThat(longValues.setDocument(1), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(1l));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(1, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(1)));
+ assertThat(doubleValues.nextValue(), equalTo(1d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testSingleValueWithMissingNumber() throws Exception {
+ fillSingleValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(2l));
+
+ assertThat(longValues.setDocument(1), equalTo(0));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(1, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testMultiValueAllSetNumber() throws Exception {
+ fillMultiValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(true));
+
+ assertThat(longValues.setDocument(0), equalTo(2));
+ assertThat(longValues.nextValue(), equalTo(2l));
+ assertThat(longValues.nextValue(), equalTo(4l));
+
+ assertThat(longValues.setDocument(1), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(1l));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(true));
+
+ assertThat(2, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+ assertThat(doubleValues.nextValue(), equalTo(4d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(1)));
+ assertThat(doubleValues.nextValue(), equalTo(1d));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+ }
+
+ @Test
+ public void testMultiValueWithMissingNumber() throws Exception {
+ fillMultiValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(true));
+
+ assertThat(longValues.setDocument(0), equalTo(2));
+ assertThat(longValues.nextValue(), equalTo(2l));
+ assertThat(longValues.nextValue(), equalTo(4l));
+
+ assertThat(longValues.setDocument(1), equalTo(0));
+
+ assertThat(longValues.setDocument(2), equalTo(1));
+ assertThat(longValues.nextValue(), equalTo(3l));
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(true));
+
+
+ assertThat(2, equalTo(doubleValues.setDocument(0)));
+ assertThat(doubleValues.nextValue(), equalTo(2d));
+ assertThat(doubleValues.nextValue(), equalTo(4d));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(1, equalTo(doubleValues.setDocument(2)));
+ assertThat(doubleValues.nextValue(), equalTo(3d));
+
+ }
+
+ @Test
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ assertThat(fieldData.getNumDocs(), equalTo(3));
+
+ // long values
+
+ LongValues longValues = fieldData.getLongValues();
+
+ assertThat(longValues.isMultiValued(), equalTo(false));
+
+ assertThat(longValues.setDocument(0), equalTo(0));
+ assertThat(longValues.setDocument(1), equalTo(0));
+ assertThat(longValues.setDocument(2), equalTo(0));
+
+ // double values
+
+ DoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(doubleValues.isMultiValued(), equalTo(false));
+
+ assertThat(0, equalTo(doubleValues.setDocument(0)));
+
+ assertThat(0, equalTo(doubleValues.setDocument(1)));
+
+ assertThat(0, equalTo(doubleValues.setDocument(2)));
+ }
+
+
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.SUM)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.SUM), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.AVG)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.AVG), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("-9", SortMode.MIN))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("9", SortMode.MAX), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
new file mode 100644
index 0000000..a33b475
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.OpenBitSet;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.search.nested.NestedFieldComparatorSource;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ */
+public abstract class AbstractStringFieldDataTests extends AbstractFieldDataImplTests {
+
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new StringField("value", "1", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit(); // TODO: Have tests with more docs for sorting
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "2", Field.Store.NO));
+ d.add(new StringField("value", "4", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new StringField("value", "02", Field.Store.NO));
+ d.add(new StringField("value", "04", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new StringField("value", "03", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new StringField("value", "04", Field.Store.NO));
+ d.add(new StringField("value", "05", Field.Store.NO));
+ d.add(new StringField("value", "06", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new StringField("value", "06", Field.Store.NO));
+ d.add(new StringField("value", "07", Field.Store.NO));
+ d.add(new StringField("value", "08", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new StringField("value", "08", Field.Store.NO));
+ d.add(new StringField("value", "09", Field.Store.NO));
+ d.add(new StringField("value", "10", Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new StringField("value", "!08", Field.Store.NO));
+ d.add(new StringField("value", "!09", Field.Store.NO));
+ d.add(new StringField("value", "!10", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Repeat(iterations=10)
+ public void testActualMissingValue() throws IOException {
+ testActualMissingValue(false);
+ }
+
+ @Repeat(iterations=10)
+ public void testActualMissingValueReverse() throws IOException {
+ testActualMissingValue(true);
+ }
+
+ public void testActualMissingValue(boolean reverse) throws IOException {
+ // missing value is set to an actual value
+ Document d = new Document();
+ final StringField s = new StringField("value", "", Field.Store.YES);
+ d.add(s);
+ final String[] values = new String[randomIntBetween(2, 30)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = _TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = atLeast(100);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ s.setStringValue(value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+
+ final IndexFieldData indexFieldData = getForField("value");
+ final String missingValue = values[1];
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, SortMode.MIN);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ final BytesRef value = new BytesRef(docValue == null ? missingValue : docValue);
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingFirst() throws IOException {
+ testSortMissing(true, false);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingFirstReverse() throws IOException {
+ testSortMissing(true, true);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingLast() throws IOException {
+ testSortMissing(false, false);
+ }
+
+ @Repeat(iterations=3)
+ public void testSortMissingLastReverse() throws IOException {
+ testSortMissing(false, true);
+ }
+
+ public void testSortMissing(boolean first, boolean reverse) throws IOException {
+ Document d = new Document();
+ final StringField s = new StringField("value", "", Field.Store.YES);
+ d.add(s);
+ final String[] values = new String[randomIntBetween(2, 10)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = _TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = atLeast(100);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ s.setStringValue(value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ final IndexFieldData indexFieldData = getForField("value");
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", SortMode.MIN);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ if (first && docValue == null) {
+ assertNull(previousValue);
+ } else if (!first && docValue != null) {
+ assertNotNull(previousValue);
+ }
+ final BytesRef value = docValue == null ? null : new BytesRef(docValue);
+ if (previousValue != null && value != null) {
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ @Repeat(iterations=3)
+ public void testNestedSortingMin() throws IOException {
+ testNestedSorting(SortMode.MIN);
+ }
+
+ @Repeat(iterations=3)
+ public void testNestedSortingMax() throws IOException {
+ testNestedSorting(SortMode.MAX);
+ }
+
+ public void testNestedSorting(SortMode sortMode) throws IOException {
+ final String[] values = new String[randomIntBetween(2, 20)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = _TestUtil.randomSimpleString(getRandom());
+ }
+ final int numParents = atLeast(100);
+ List<Document> docs = new ArrayList<Document>();
+ final OpenBitSet parents = new OpenBitSet();
+ for (int i = 0; i < numParents; ++i) {
+ docs.clear();
+ final int numChildren = randomInt(4);
+ for (int j = 0; j < numChildren; ++j) {
+ final Document child = new Document();
+ final int numValues = randomInt(3);
+ for (int k = 0; k < numValues; ++k) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ child.add(new StringField("text", value, Store.YES));
+ }
+ docs.add(child);
+ }
+ final Document parent = new Document();
+ parent.add(new StringField("type", "parent", Store.YES));
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value != null) {
+ parent.add(new StringField("text", value, Store.YES));
+ }
+ docs.add(parent);
+ parents.set(parents.prevSetBit(parents.length() - 1) + docs.size());
+ writer.addDocuments(docs);
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ IndexFieldData<?> fieldData = getForField("text");
+ final BytesRef missingValue;
+ switch (randomInt(4)) {
+ case 0:
+ missingValue = new BytesRef();
+ break;
+ case 1:
+ missingValue = BytesRefFieldComparatorSource.MAX_TERM;
+ break;
+ case 2:
+ missingValue = new BytesRef(RandomPicks.randomFrom(getRandom(), values));
+ break;
+ default:
+ missingValue = new BytesRef(_TestUtil.randomSimpleString(getRandom()));
+ break;
+ }
+ BytesRefFieldComparatorSource innerSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode);
+ Filter parentFilter = new TermFilter(new Term("type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("text", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
+ assertTrue(topDocs.scoreDocs.length > 0);
+ BytesRef previous = null;
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final int docID = topDocs.scoreDocs[i].doc;
+ assertTrue("expected " + docID + " to be a parent", parents.get(docID));
+ BytesRef cmpValue = null;
+ for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
+ String[] vals = searcher.doc(child).getValues("text");
+ if (vals.length == 0) {
+ vals = new String[] {missingValue.utf8ToString()};
+ }
+ for (String value : vals) {
+ final BytesRef bytesValue = new BytesRef(value);
+ if (cmpValue == null) {
+ cmpValue = bytesValue;
+ } else if (sortMode == SortMode.MIN && bytesValue.compareTo(cmpValue) < 0) {
+ cmpValue = bytesValue;
+ } else if (sortMode == SortMode.MAX && bytesValue.compareTo(cmpValue) > 0) {
+ cmpValue = bytesValue;
+ }
+ }
+ }
+ if (cmpValue == null) {
+ cmpValue = missingValue;
+ }
+ if (previous != null) {
+ assertNotNull(cmpValue);
+ assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
+ }
+ previous = cmpValue;
+ }
+ searcher.getIndexReader().close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java b/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
new file mode 100644
index 0000000..2a205f4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class DisabledFieldDataFormatTests extends ElasticsearchIntegrationTest {
+
+ public void test() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ for (int i = 0; i < 10; ++i) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("s", "value" + i).execute().actionGet();
+ }
+
+ refresh();
+
+ // disable field data
+ updateFormat("disabled");
+
+ SearchResponse resp = null;
+ // try to run something that relies on field data and make sure that it fails
+ try {
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertTrue(resp.toString(), resp.getFailedShards() > 0);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+
+ // enable it again
+ updateFormat("paged_bytes");
+
+ // try to run something that relies on field data and make sure that it works
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertNoFailures(resp);
+
+ // disable it again
+ updateFormat("disabled");
+
+ // this time, it should work because segments are already loaded
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertNoFailures(resp);
+
+ // but add more docs and the new segment won't be loaded
+ client().prepareIndex("test", "type", "-1").setSource("s", "value").execute().actionGet();
+ refresh();
+ try {
+ resp = client().prepareSearch("test").addAggregation(AggregationBuilders.terms("t").field("s")).execute().actionGet();
+ assertTrue(resp.toString(), resp.getFailedShards() > 0);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+
+ private void updateFormat(String format) throws Exception {
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("s")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", format)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
new file mode 100644
index 0000000..77a5bb2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class DoubleFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ protected String one() {
+ return "1.0";
+ }
+
+ protected String two() {
+ return "2.0";
+ }
+
+ protected String three() {
+ return "3.0";
+ }
+
+ protected String four() {
+ return "4.0";
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2, Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ d.add(new DoubleField("value", 5, Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ d.add(new DoubleField("value", 7, Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ d.add(new DoubleField("value", 9, Field.Store.NO));
+ d.add(new DoubleField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new DoubleField("value", -8, Field.Store.NO));
+ d.add(new DoubleField("value", -9, Field.Store.NO));
+ d.add(new DoubleField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
new file mode 100644
index 0000000..80e78fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.CompositeReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+import static org.hamcrest.Matchers.*;
+
+public class DuelFieldDataTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return null;
+ }
+
+ public static int atLeast(Random random, int i) {
+ int min = i;
+ int max = min + (min / 2);
+ return min + random.nextInt(max - min);
+ }
+
+ @Test
+ public void testDuelAllTypesSingleValue() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("bytes").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", LuceneTestCase.defaultCodecSupportsSortedSet() ? "doc_values" : "fst").endObject().endObject()
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ for (int i = 0; i < atLeast; i++) {
+ String s = Integer.toString(randomByte());
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("bytes", "byte", "short", "integer", "long", "float", "double")) {
+ doc = doc.field(fieldName, s);
+ }
+
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes")), Type.Bytes);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "doc_values")), Type.Long);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "doc_values")), Type.Float);
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "doc_values")), Type.Bytes);
+ }
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ Preprocessor pre = new ToDoublePreprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ }
+ }
+
+
+ @Test
+ public void testDuelIntegers() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ byte[] values = new byte[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxNumValues);
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ values[j] = 1; // test deduplication
+ } else {
+ values[j] = randomByte();
+ }
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("byte", "short", "integer", "long")) {
+ doc = doc.startArray(fieldName);
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray();
+ }
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, Type>();
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("byte", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", ImmutableSettings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", ImmutableSettings.builder().put("format", "doc_values")), Type.Long);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexNumericFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataLong(random, context, leftFieldData, rightFieldData);
+ duelFieldDataLong(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataLong(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+ @Test
+ public void testDuelDoubles() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ float[] values = new float[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxNumValues);
+ float def = randomBoolean() ? randomFloat() : Float.NaN;
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ values[j] = def;
+ } else {
+ values[j] = randomFloat();
+ }
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("float");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().startArray("double");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, Type>();
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("double", ImmutableSettings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", ImmutableSettings.builder().put("format", "doc_values")), Type.Float);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexNumericFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ assertOrder(left.getValue().order(), leftFieldData, context);
+ assertOrder(right.getValue().order(), rightFieldData, context);
+ duelFieldDataDouble(random, context, leftFieldData, rightFieldData);
+ duelFieldDataDouble(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataDouble(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+
+ @Test
+ public void testDuelStrings() throws Exception {
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ for (int i = 0; i < atLeast; i++) {
+ Document d = new Document();
+ d.add(new StringField("_id", "" + i, Field.Store.NO));
+ if (random.nextInt(15) != 0) {
+ int[] numbers = getNumbers(random, Integer.MAX_VALUE);
+ for (int j : numbers) {
+ final String s = English.longToEnglish(j);
+ d.add(new StringField("bytes", s, Field.Store.NO));
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef(s)));
+ }
+ }
+ if (random.nextInt(10) == 0) {
+ d.add(new StringField("bytes", "", Field.Store.NO));
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef()));
+ }
+ }
+ }
+ writer.addDocument(d);
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes")), Type.Bytes);
+ if (LuceneTestCase.defaultCodecSupportsSortedSet()) {
+ typeMap.put(new FieldDataType("string", ImmutableSettings.builder().put("format", "doc_values")), Type.Bytes);
+ }
+ // TODO add filters
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ Preprocessor pre = new Preprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ assertOrder(AtomicFieldData.Order.BYTES, leftFieldData, atomicReaderContext);
+ assertOrder(AtomicFieldData.Order.BYTES, rightFieldData, atomicReaderContext);
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ perSegment.close();
+ }
+
+ }
+
+ public void testDuelGeoPoints() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("geopoint").field("type", "geo_point").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ Random random = getRandom();
+ int atLeast = atLeast(random, 1000);
+ int maxValuesPerDoc = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ // to test deduplication
+ double defaultLat = randomDouble() * 180 - 90;
+ double defaultLon = randomDouble() * 360 - 180;
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxValuesPerDoc);
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("geopoint");
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ doc.startObject().field("lat", defaultLat).field("lon", defaultLon).endObject();
+ } else {
+ doc.startObject().field("lat", randomDouble() * 180 - 90).field("lon", randomDouble() * 360 - 180).endObject();
+ }
+ }
+ doc = doc.endArray().endObject();
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ AtomicReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ final Distance precision = new Distance(1, randomFrom(DistanceUnit.values()));
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "array")), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "compressed").put("precision", precision)), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", ImmutableSettings.builder().put("format", "doc_values")), Type.GeoPoint);
+
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<Entry<FieldDataType, Type>>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexGeoPointFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexGeoPointFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataGeoPoint(random, context, leftFieldData, rightFieldData, precision);
+ duelFieldDataGeoPoint(random, context, rightFieldData, leftFieldData, precision);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<AtomicReaderContext> leaves = composite.leaves();
+ for (AtomicReaderContext atomicReaderContext : leaves) {
+ duelFieldDataGeoPoint(random, atomicReaderContext, leftFieldData, rightFieldData, precision);
+ }
+ perSegment.close();
+ }
+ }
+
+ private void assertOrder(AtomicFieldData.Order order, IndexFieldData<?> data, AtomicReaderContext context) throws Exception {
+ AtomicFieldData<?> leftData = randomBoolean() ? data.load(context) : data.loadDirect(context);
+ assertThat(leftData.getBytesValues(randomBoolean()).getOrder(), is(order));
+ }
+
+ private int[] getNumbers(Random random, int margin) {
+ if (random.nextInt(20) == 0) {
+ int[] num = new int[1 + random.nextInt(10)];
+ for (int i = 0; i < num.length; i++) {
+ int v = (random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin));
+ num[i] = v;
+ }
+ return num;
+ }
+ return new int[]{(random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin))};
+ }
+
+
+ private static void duelFieldDataBytes(Random random, AtomicReaderContext context, IndexFieldData<?> left, IndexFieldData<?> right, Preprocessor pre) throws Exception {
+ AtomicFieldData<?> leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicFieldData<?> rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ BytesValues leftBytesValues = leftData.getBytesValues(random.nextBoolean());
+ BytesValues rightBytesValues = rightData.getBytesValues(random.nextBoolean());
+ BytesRef leftSpare = new BytesRef();
+ BytesRef rightSpare = new BytesRef();
+
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ assertThat((numValues = leftBytesValues.setDocument(i)), equalTo(rightBytesValues.setDocument(i)));
+ BytesRef previous = null;
+ for (int j = 0; j < numValues; j++) {
+
+ rightSpare.copyBytes(rightBytesValues.nextValue());
+ leftSpare.copyBytes(leftBytesValues.nextValue());
+ assertThat(rightSpare.hashCode(), equalTo(rightBytesValues.currentValueHash()));
+ assertThat(leftSpare.hashCode(), equalTo(leftBytesValues.currentValueHash()));
+ if (previous != null && leftBytesValues.getOrder() == rightBytesValues.getOrder()) { // we can only compare the
+ assertThat(pre.compare(previous, rightSpare), lessThan(0));
+ }
+ previous = BytesRef.deepCopyOf(rightSpare);
+ pre.toString(rightSpare);
+ pre.toString(leftSpare);
+ assertThat(pre.toString(leftSpare), equalTo(pre.toString(rightSpare)));
+ if (leftSpare.equals(rightSpare)) {
+ assertThat(leftBytesValues.currentValueHash(), equalTo(rightBytesValues.currentValueHash()));
+ }
+ }
+ }
+ }
+
+
+ private static void duelFieldDataDouble(Random random, AtomicReaderContext context, IndexNumericFieldData<?> left, IndexNumericFieldData<?> right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ DoubleValues leftDoubleValues = leftData.getDoubleValues();
+ DoubleValues rightDoubleValues = rightData.getDoubleValues();
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ assertThat((numValues = leftDoubleValues.setDocument(i)), equalTo(rightDoubleValues.setDocument(i)));
+ double previous = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current = rightDoubleValues.nextValue();
+ if (Double.isNaN(current)) {
+ assertTrue(Double.isNaN(leftDoubleValues.nextValue()));
+ } else {
+ assertThat(leftDoubleValues.nextValue(), closeTo(current, 0.0001));
+ }
+ if (j > 0) {
+ assertThat(Double.compare(previous,current), lessThan(0));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataLong(Random random, AtomicReaderContext context, IndexNumericFieldData<?> left, IndexNumericFieldData right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ LongValues leftLongValues = leftData.getLongValues();
+ LongValues rightLongValues = rightData.getLongValues();
+ for (int i = 0; i < numDocs; i++) {
+ int numValues = 0;
+ long previous = 0;
+ assertThat((numValues = leftLongValues.setDocument(i)), equalTo(rightLongValues.setDocument(i)));
+ for (int j = 0; j < numValues; j++) {
+ long current;
+ assertThat(leftLongValues.nextValue(), equalTo(current = rightLongValues.nextValue()));
+ if (j > 0) {
+ assertThat(previous, lessThan(current));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataGeoPoint(Random random, AtomicReaderContext context, IndexGeoPointFieldData<?> left, IndexGeoPointFieldData<?> right, Distance precision) throws Exception {
+ AtomicGeoPointFieldData<?> leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicGeoPointFieldData<?> rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ assertThat(leftData.getNumDocs(), equalTo(rightData.getNumDocs()));
+
+ int numDocs = leftData.getNumDocs();
+ GeoPointValues leftValues = leftData.getGeoPointValues();
+ GeoPointValues rightValues = rightData.getGeoPointValues();
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = leftValues.setDocument(i);
+ assertEquals(numValues, rightValues.setDocument(i));
+ List<GeoPoint> leftPoints = Lists.newArrayList();
+ List<GeoPoint> rightPoints = Lists.newArrayList();
+ for (int j = 0; j < numValues; ++j) {
+ GeoPoint l = leftValues.nextValue();
+ leftPoints.add(new GeoPoint(l.getLat(), l.getLon()));
+ GeoPoint r = rightValues.nextValue();
+ rightPoints.add(new GeoPoint(r.getLat(), r.getLon()));
+ }
+ for (GeoPoint l : leftPoints) {
+ assertTrue("Couldn't find " + l + " among " + rightPoints, contains(l, rightPoints, precision));
+ }
+ for (GeoPoint r : rightPoints) {
+ assertTrue("Couldn't find " + r + " among " + leftPoints, contains(r, leftPoints, precision));
+ }
+ }
+ }
+
+ private static boolean contains(GeoPoint point, List<GeoPoint> set, Distance precision) {
+ for (GeoPoint r : set) {
+ final double distance = GeoDistance.PLANE.calculate(point.getLat(), point.getLon(), r.getLat(), r.getLon(), DistanceUnit.METERS);
+ if (new Distance(distance, DistanceUnit.METERS).compareTo(precision) <= 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static class Preprocessor {
+
+ public String toString(BytesRef ref) {
+ return ref.utf8ToString();
+ }
+
+ public int compare(BytesRef a, BytesRef b) {
+ return a.compareTo(b);
+ }
+ }
+
+ private static class ToDoublePreprocessor extends Preprocessor {
+
+ @Override
+ public String toString(BytesRef ref) {
+ assertTrue(ref.length > 0);
+ return Double.toString(Double.parseDouble(super.toString(ref)));
+ }
+
+ @Override
+ public int compare(BytesRef a, BytesRef b) {
+ Double _a = Double.parseDouble(super.toString(a));
+ return _a.compareTo(Double.parseDouble(super.toString(b)));
+ }
+ }
+
+
+ private static enum Type {
+ Float(AtomicFieldData.Order.NUMERIC), Double(AtomicFieldData.Order.NUMERIC), Integer(AtomicFieldData.Order.NUMERIC), Long(AtomicFieldData.Order.NUMERIC), Bytes(AtomicFieldData.Order.BYTES), GeoPoint(AtomicFieldData.Order.NONE);
+
+ private final AtomicFieldData.Order order;
+ Type(AtomicFieldData.Order order) {
+ this.order = order;
+ }
+
+ public AtomicFieldData.Order order() {
+ return order;
+ }
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
new file mode 100644
index 0000000..e9047c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+/**
+ */
+public class FSTPackedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "fst"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java b/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
new file mode 100644
index 0000000..fdeb988
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class FieldDataFilterIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRegexpFilter() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", between(1,5))
+ .put("index.number_of_replicas", 0));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .startObject("fielddata")
+ .startObject("filter")
+ .startObject("regex")
+ .field("pattern", "^bac.*")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("not_filtered")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type", mapping));
+ ensureGreen();
+ int numDocs = atLeast(5);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type", "" + 0).setSource("name", "bacon bastards", "not_filtered", "bacon bastards").get();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("name").field("name"))
+ .addFacet(termsFacet("not_filtered").field("not_filtered")).get();
+ Facets facets = searchResponse.getFacets();
+ TermsFacet nameFacet = facets.facet("name");
+ assertThat(nameFacet.getEntries().size(), Matchers.equalTo(1));
+ assertThat(nameFacet.getEntries().get(0).getTerm().string(), Matchers.equalTo("bacon"));
+
+ TermsFacet notFilteredFacet = facets.facet("not_filtered");
+ assertThat(notFilteredFacet.getEntries().size(), Matchers.equalTo(2));
+ assertThat(notFilteredFacet.getEntries().get(0).getTerm().string(), Matchers.isOneOf("bacon", "bastards"));
+ assertThat(notFilteredFacet.getEntries().get(1).getTerm().string(), Matchers.isOneOf("bacon", "bastards"));
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java b/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
new file mode 100644
index 0000000..91dc5ef
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.AtomicFieldData.WithOrdinals;
+import org.elasticsearch.index.fielddata.ScriptDocValues.Strings;
+import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
+import org.elasticsearch.index.merge.Merges;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class FilterFieldDataTest extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Test
+ public void testFilterByFrequency() throws Exception {
+ Random random = getRandom();
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ d.add(new StringField("low_freq", "100", Field.Store.NO));
+ d.add(new StringField("med_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ d.add(new StringField("med_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+ }
+ writer.addDocument(d);
+ }
+ Merges.forceMerge(writer, 1);
+ AtomicReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", 0.0d).put("filter.frequency.max", random.nextBoolean() ? 100 : 0.5d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d).put("filter.frequency.max", 201));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("5"));
+ }
+
+ {
+ ifdService.clear(); // test # docs with value
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "med_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "med_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{2,3}") // allows 10 & 100
+ .put("filter.frequency.min_segment_size", 0)
+ .put("filter.frequency.min", random.nextBoolean() ? 1 : 1d/200.0d) // 100, 10, 5
+ .put("filter.frequency.max", random.nextBoolean() ? 99 : 99d/200.0d)); // 100
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("100"));
+ }
+ }
+
+ }
+
+ @Test
+ public void testFilterByRegExp() throws Exception {
+
+ int hundred = 0;
+ int ten = 0;
+ int five = 0;
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ hundred++;
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ ten++;
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ five++;
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+
+ }
+ writer.addDocument(d);
+ }
+ System.out.println(hundred + " " + ten + " " + five);
+ Merges.forceMerge(writer, 1);
+ AtomicReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d"));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(1L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("5"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{1,2}"));
+ IndexFieldData<?> fieldData = getForField(fieldDataType, "high_freq");
+ AtomicFieldData.WithOrdinals<ScriptDocValues.Strings> loadDirect = (WithOrdinals<Strings>) fieldData.loadDirect(context);
+ BytesValues.WithOrdinals bytesValues = loadDirect.getBytesValues(randomBoolean());
+ Docs ordinals = bytesValues.ordinals();
+ assertThat(2L, equalTo(ordinals.getNumOrds()));
+ assertThat(1000, equalTo(ordinals.getNumDocs()));
+ assertThat(bytesValues.getValueByOrd(1).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.getValueByOrd(2).utf8ToString(), equalTo("5"));
+ }
+ }
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
new file mode 100644
index 0000000..a91797d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class FloatFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ protected String one() {
+ return "1.0";
+ }
+
+ protected String two() {
+ return "2.0";
+ }
+
+ protected String three() {
+ return "3.0";
+ }
+
+ protected String four() {
+ return "4.0";
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2, Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ d.add(new FloatField("value", 5, Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ d.add(new FloatField("value", 7, Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ d.add(new FloatField("value", 9, Field.Store.NO));
+ d.add(new FloatField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new FloatField("value", -8, Field.Store.NO));
+ d.add(new FloatField("value", -9, Field.Store.NO));
+ d.add(new FloatField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
new file mode 100644
index 0000000..2f85936
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.plain.*;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class IndexFieldDataServiceTests extends ElasticsearchTestCase {
+
+ private static Settings DOC_VALUES_SETTINGS = ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ @SuppressWarnings("unchecked")
+ public void testGetForFieldDefaults() {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ for (boolean docValues : Arrays.asList(true, false)) {
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper stringMapper = new StringFieldMapper.Builder("string").tokenized(false).fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedSetDVBytesIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PagedBytesIndexFieldData);
+ }
+
+ for (FieldMapper<?> mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new ShortFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new IntegerFieldMapper.Builder("int").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx),
+ new LongFieldMapper.Builder("long").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+ }
+
+ final FloatFieldMapper floatMapper = new FloatFieldMapper.Builder("float").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+ }
+
+ final DoubleFieldMapper doubleMapper = new DoubleFieldMapper.Builder("double").fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ if (docValues) {
+ assertTrue(fd instanceof BinaryDVNumericIndexFieldData);
+ } else {
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testByPassDocValues() {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper stringMapper = MapperBuilders.stringField("string").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(ImmutableSettings.builder().put("format", "fst").build()).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ assertTrue(fd instanceof FSTBytesIndexFieldData);
+
+ final Settings fdSettings = ImmutableSettings.builder().put("format", "array").build();
+ for (FieldMapper<?> mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new ShortFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new IntegerFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new LongFieldMapper.Builder("long").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+
+ final FloatFieldMapper floatMapper = MapperBuilders.floatField("float").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+
+ final DoubleFieldMapper doubleMapper = MapperBuilders.doubleField("double").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+
+ public void testChangeFieldDataFormat() throws Exception {
+ final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ final StringFieldMapper mapper1 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx);
+ final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+ Document doc = new Document();
+ doc.add(new StringField("s", "thisisastring", Store.NO));
+ writer.addDocument(doc);
+ final IndexReader reader1 = DirectoryReader.open(writer, true);
+ IndexFieldData<?> ifd = ifdService.getForField(mapper1);
+ assertThat(ifd, instanceOf(PagedBytesIndexFieldData.class));
+ Set<AtomicReader> oldSegments = Collections.newSetFromMap(new IdentityHashMap<AtomicReader, Boolean>());
+ for (AtomicReaderContext arc : reader1.leaves()) {
+ oldSegments.add(arc.reader());
+ AtomicFieldData<?> afd = ifd.load(arc);
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ }
+ // write new segment
+ writer.addDocument(doc);
+ final IndexReader reader2 = DirectoryReader.open(writer, true);
+ final StringFieldMapper mapper2 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, "fst").build()).build(ctx);
+ ifdService.onMappingUpdate();
+ ifd = ifdService.getForField(mapper2);
+ assertThat(ifd, instanceOf(FSTBytesIndexFieldData.class));
+ for (AtomicReaderContext arc : reader2.leaves()) {
+ AtomicFieldData<?> afd = ifd.load(arc);
+ if (oldSegments.contains(arc.reader())) {
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ } else {
+ assertThat(afd, instanceOf(FSTBytesAtomicFieldData.class));
+ }
+ }
+ reader1.close();
+ reader2.close();
+ writer.close();
+ writer.getDirectory().close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
new file mode 100644
index 0000000..288cfea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.plain.PackedArrayAtomicFieldData;
+import org.elasticsearch.index.merge.Merges;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Tests for all integer types (byte, short, int, long).
+ */
+public class LongFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // we don't want to optimize the type so it will always be a long...
+ return new FieldDataType("long", ImmutableSettings.builder());
+ }
+
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Test
+ public void testOptimizeTypeLong() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", Integer.MAX_VALUE + 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", Integer.MIN_VALUE - 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData, instanceOf(PackedArrayAtomicFieldData.class));
+ assertThat(getFirst(fieldData.getLongValues(), 0), equalTo((long) Integer.MAX_VALUE + 1l));
+ assertThat(getFirst(fieldData.getLongValues(), 1), equalTo((long) Integer.MIN_VALUE - 1l));
+ }
+
+ private static long getFirst(LongValues values, int docId) {
+ final int numValues = values.setDocument(docId);
+ assertThat(numValues , is(1));
+ return values.nextValue();
+ }
+
+ private static double getFirst(DoubleValues values, int docId) {
+ final int numValues = values.setDocument(docId);
+ assertThat(numValues , is(1));
+ return values.nextValue();
+ }
+
+ @Test
+ public void testDateScripts() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ ScriptDocValues.Longs scriptValues = (ScriptDocValues.Longs) fieldData.getScriptValues();
+ scriptValues.setNextDocId(0);
+ assertThat(scriptValues.getValue(), equalTo(2l));
+ assertThat(scriptValues.getDate().getMillis(), equalTo(2l));
+ assertThat(scriptValues.getDate().getZone(), equalTo(DateTimeZone.UTC));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ d.add(new LongField("value", 5, Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ d.add(new LongField("value", 7, Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ d.add(new LongField("value", 9, Field.Store.NO));
+ d.add(new LongField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new LongField("value", -8, Field.Store.NO));
+ d.add(new LongField("value", -9, Field.Store.NO));
+ d.add(new LongField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
+
+ // TODO: use random() when migrating to Junit
+ public static enum Data {
+ SINGLE_VALUED_DENSE_ENUM {
+ public int numValues(Random r) {
+ return 1;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return 1 + r.nextInt(16);
+ }
+ },
+ SINGLE_VALUED_DENSE_DATE {
+ public int numValues(Random r) {
+ return 1;
+ }
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_DATE {
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_ENUM {
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ return 3 + r.nextInt(8);
+ }
+ },
+ SINGLE_VALUED_SPARSE_RANDOM {
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.1f ? 1 : 0;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_SPARSE_RANDOM {
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.1f ? 1 + r.nextInt(5) : 0;
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_DENSE_RANDOM {
+ public int numValues(Random r) {
+ return 1 + r.nextInt(3);
+ }
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ };
+ public abstract int numValues(Random r);
+ public abstract long nextValue(Random r);
+ }
+
+ private void test(List<LongOpenHashSet> values) throws Exception {
+ StringField id = new StringField("_id", "", Field.Store.NO);
+
+ for (int i = 0; i < values.size(); ++i) {
+ Document doc = new Document();
+ id.setStringValue("" + i);
+ doc.add(id);
+ final LongOpenHashSet v = values.get(i);
+ final boolean[] states = v.allocated;
+ final long[] keys = v.keys;
+
+ for (int j = 0; j < states.length; j++) {
+ if (states[j]) {
+ LongField value = new LongField("value", keys[j], Field.Store.NO);
+ doc.add(value);
+ }
+ }
+ writer.addDocument(doc);
+ }
+ Merges.forceMerge(writer, 1);
+
+ final IndexNumericFieldData indexFieldData = getForField("value");
+ final AtomicNumericFieldData atomicFieldData = indexFieldData.load(refreshReader());
+ final LongValues data = atomicFieldData.getLongValues();
+ final DoubleValues doubleData = atomicFieldData.getDoubleValues();
+ final LongOpenHashSet set = new LongOpenHashSet();
+ final DoubleOpenHashSet doubleSet = new DoubleOpenHashSet();
+ for (int i = 0; i < values.size(); ++i) {
+ final LongOpenHashSet v = values.get(i);
+
+ assertThat(data.setDocument(i) > 0, equalTo(!v.isEmpty()));
+ assertThat(doubleData.setDocument(i) > 0, equalTo(!v.isEmpty()));
+
+ set.clear();
+ int numValues = data.setDocument(i);
+ for (int j = 0; j < numValues; j++) {
+ set.add(data.nextValue());
+ }
+ assertThat(set, equalTo(v));
+
+ final DoubleOpenHashSet doubleV = new DoubleOpenHashSet();
+ final boolean[] states = v.allocated;
+ final long[] keys = v.keys;
+ for (int j = 0; j < states.length; j++) {
+ if (states[j]) {
+ doubleV.add((double) keys[j]);
+ }
+ }
+ doubleSet.clear();
+ numValues = doubleData.setDocument(i);
+ double prev = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current;
+ doubleSet.add(current = doubleData.nextValue());
+ if (j > 0) {
+ assertThat(prev, lessThan(current));
+ }
+ prev = current;
+ }
+ assertThat(doubleSet, equalTo(doubleV));
+ }
+ }
+
+ private void test(Data data) throws Exception {
+ Random r = getRandom();
+ final int numDocs = 1000 + r.nextInt(19000);
+ final List<LongOpenHashSet> values = new ArrayList<LongOpenHashSet>(numDocs);
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = data.numValues(r);
+ final LongOpenHashSet vals = new LongOpenHashSet(numValues);
+ for (int j = 0; j < numValues; ++j) {
+ vals.add(data.nextValue(r));
+ }
+ values.add(vals);
+ }
+ test(values);
+ }
+
+ public void testSingleValuedDenseEnum() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_ENUM);
+ }
+
+ public void testSingleValuedDenseDate() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_DATE);
+ }
+
+ public void testSingleValuedSparseRandom() throws Exception {
+ test(Data.SINGLE_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDate() throws Exception {
+ test(Data.MULTI_VALUED_DATE);
+ }
+
+ public void testMultiValuedEnum() throws Exception {
+ test(Data.MULTI_VALUED_ENUM);
+ }
+
+ public void testMultiValuedSparseRandom() throws Exception {
+ test(Data.MULTI_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDenseRandom() throws Exception {
+ test(Data.MULTI_VALUED_DENSE_RANDOM);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
new file mode 100644
index 0000000..0eca185
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+
+/** Returns an implementation based on paged bytes which doesn't implement WithOrdinals in order to visit different paths in the code,
+ * eg. BytesRefFieldComparatorSource makes decisions based on whether the field data implements WithOrdinals. */
+public class NoOrdinalsStringFieldDataTests extends PagedBytesStringFieldDataTests {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public IndexFieldData<AtomicFieldData<ScriptDocValues>> getForField(String fieldName) {
+ final IndexFieldData<?> in = super.getForField(fieldName);
+ return new IndexFieldData<AtomicFieldData<ScriptDocValues>>() {
+
+ @Override
+ public Index index() {
+ return in.index();
+ }
+
+ @Override
+ public Names getFieldNames() {
+ return in.getFieldNames();
+ }
+
+ @Override
+ public boolean valuesOrdered() {
+ return in.valuesOrdered();
+ }
+
+ @Override
+ public AtomicFieldData<ScriptDocValues> load(AtomicReaderContext context) {
+ return in.load(context);
+ }
+
+ @Override
+ public AtomicFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception {
+ return in.loadDirect(context);
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
+ }
+
+ @Override
+ public void clear() {
+ in.clear();
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ in.clear(reader);
+ }
+
+ };
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java b/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
new file mode 100644
index 0000000..7496ce1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+
+/**
+ */
+public class PagedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java b/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
new file mode 100644
index 0000000..eb71564
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class MultiOrdinalsTests extends ElasticsearchTestCase {
+
+ protected final Ordinals creationMultiOrdinals(OrdinalsBuilder builder) {
+ return this.creationMultiOrdinals(builder, ImmutableSettings.builder());
+ }
+
+
+ protected Ordinals creationMultiOrdinals(OrdinalsBuilder builder, ImmutableSettings.Builder settings) {
+ return builder.build(settings.build());
+ }
+
+
+ @Test
+ public void testRandomValues() throws IOException {
+ Random random = getRandom();
+ int numDocs = 100 + random.nextInt(1000);
+ int numOrdinals = 1 + random.nextInt(200);
+ int numValues = 100 + random.nextInt(100000);
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ Set<OrdAndId> ordsAndIdSet = new HashSet<OrdAndId>();
+ for (int i = 0; i < numValues; i++) {
+ ordsAndIdSet.add(new OrdAndId(1 + random.nextInt(numOrdinals), random.nextInt(numDocs)));
+ }
+ List<OrdAndId> ordsAndIds = new ArrayList<OrdAndId>(ordsAndIdSet);
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord == o2.ord) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id > o2.id) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ long lastOrd = -1;
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (lastOrd != ordAndId.ord) {
+ lastOrd = ordAndId.ord;
+ builder.nextOrdinal();
+ }
+ ordAndId.ord = builder.currentOrdinal(); // remap the ordinals in case we have gaps?
+ builder.addDoc(ordAndId.id);
+ }
+
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id == o2.id) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord > o2.ord) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ Ordinals ords = creationMultiOrdinals(builder);
+ Ordinals.Docs docs = ords.ordinals();
+ int docId = ordsAndIds.get(0).id;
+ List<Long> docOrds = new ArrayList<Long>();
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (docId == ordAndId.id) {
+ docOrds.add(ordAndId.ord);
+ } else {
+ if (!docOrds.isEmpty()) {
+ assertThat(docs.getOrd(docId), equalTo(docOrds.get(0)));
+ LongsRef ref = docs.getOrds(docId);
+ assertThat(ref.offset, equalTo(0));
+
+ for (int i = ref.offset; i < ref.length; i++) {
+ assertThat("index: " + i + " offset: " + ref.offset + " len: " + ref.length, ref.longs[i], equalTo(docOrds.get(i)));
+ }
+ final long[] array = new long[docOrds.size()];
+ for (int i = 0; i < array.length; i++) {
+ array[i] = docOrds.get(i);
+ }
+ assertIter(docs, docId, array);
+ }
+ for (int i = docId + 1; i < ordAndId.id; i++) {
+ assertThat(docs.getOrd(i), equalTo(0L));
+ }
+ docId = ordAndId.id;
+ docOrds.clear();
+ docOrds.add(ordAndId.ord);
+
+ }
+ }
+
+ }
+
+ public static class OrdAndId {
+ long ord;
+ final int id;
+
+ public OrdAndId(long ord, int id) {
+ this.ord = ord;
+ this.id = id;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + id;
+ result = prime * result + (int) ord;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ OrdAndId other = (OrdAndId) obj;
+ if (id != other.id) {
+ return false;
+ }
+ if (ord != other.ord) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ @Test
+ public void testOrdinals() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 32;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ builder.nextOrdinal(); // 1
+ builder.addDoc(1).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 2
+ builder.addDoc(0).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 3
+ builder.addDoc(2).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 4
+ builder.addDoc(0).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 5
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ long ord = builder.nextOrdinal(); // 6
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ for (long i = ord; i < maxOrds; i++) {
+ builder.nextOrdinal();
+ builder.addDoc(5).addDoc(6);
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {2, 4},
+ {1},
+ {3},
+ {},
+ {1, 3, 4, 5, 6},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
+ };
+
+ Ordinals ordinals = creationMultiOrdinals(builder);
+ Ordinals.Docs docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ protected static void assertIter(Ordinals.Docs docs, int docId, long... expectedOrdinals) {
+ assertThat(docs.setDocument(docId), equalTo(expectedOrdinals.length));
+ for (long expectedOrdinal : expectedOrdinals) {
+ assertThat(docs.nextOrd(), equalTo(expectedOrdinal));
+ }
+ }
+
+ @Test
+ public void testMultiValuesDocsWithOverlappingStorageArrays() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 15;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ for (int i = 0; i < maxOrds; i++) {
+ builder.nextOrdinal();
+ if (i < 10) {
+ builder.addDoc(0);
+ }
+ builder.addDoc(1);
+ if (i == 0) {
+ builder.addDoc(2);
+ }
+ if (i < 5) {
+ builder.addDoc(3);
+
+ }
+ if (i < 6) {
+ builder.addDoc(4);
+
+ }
+ if (i == 1) {
+ builder.addDoc(5);
+ }
+ if (i < 10) {
+ builder.addDoc(6);
+ }
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {1},
+ {1, 2, 3, 4, 5},
+ {1, 2, 3, 4, 5, 6},
+ {2},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ };
+
+ Ordinals ordinals = new MultiOrdinals(builder, PackedInts.FASTEST);
+ Ordinals.Docs docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ private void assertEquals(Ordinals.Docs docs, long[][] ordinalPlan) {
+ long numOrds = 0;
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ if (ordinalPlan[doc].length > 0) {
+ numOrds = Math.max(numOrds, ordinalPlan[doc][ordinalPlan[doc].length - 1]);
+ }
+ }
+ assertThat(docs.getNumDocs(), equalTo(ordinalPlan.length));
+ assertThat(docs.getNumOrds(), equalTo(numOrds)); // Includes null ord
+ assertThat(docs.getMaxOrd(), equalTo(numOrds + 1));
+ assertThat(docs.isMultiValued(), equalTo(true));
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ LongsRef ref = docs.getOrds(doc);
+ assertThat(ref.offset, equalTo(0));
+ long[] ords = ordinalPlan[doc];
+ assertThat(ref, equalTo(new LongsRef(ords, 0, ords.length)));
+ assertIter(docs, doc, ords);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java b/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
new file mode 100644
index 0000000..2dea5cc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SingleOrdinalsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSvValues() throws IOException {
+ int numDocs = 1000000;
+ int numOrdinals = numDocs / 4;
+ Map<Integer, Long> controlDocToOrdinal = new HashMap<Integer, Long>();
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ long ordinal = builder.nextOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ if (doc % numOrdinals == 0) {
+ ordinal = builder.nextOrdinal();
+ }
+ controlDocToOrdinal.put(doc, ordinal);
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+ Ordinals.Docs docs = ords.ordinals();
+
+ assertThat(controlDocToOrdinal.size(), equalTo(docs.getNumDocs()));
+ for (Map.Entry<Integer, Long> entry : controlDocToOrdinal.entrySet()) {
+ assertThat(entry.getValue(), equalTo(docs.getOrd(entry.getKey())));
+ }
+
+ }
+
+ @Test
+ public void testMvOrdinalsTrigger() throws IOException {
+ int numDocs = 1000000;
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ builder.nextOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+
+ builder.nextOrdinal();
+ builder.addDoc(0);
+ ords = builder.build(ImmutableSettings.EMPTY);
+ assertThat(ords, not(instanceOf(SinglePackedOrdinals.class)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java b/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
new file mode 100644
index 0000000..f3182fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class CommitPointsTests extends ElasticsearchTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(CommitPointsTests.class);
+
+ @Test
+ public void testCommitPointXContent() throws Exception {
+ ArrayList<CommitPoint.FileInfo> indexFiles = Lists.newArrayList();
+ indexFiles.add(new CommitPoint.FileInfo("file1", "file1_p", 100, "ck1"));
+ indexFiles.add(new CommitPoint.FileInfo("file2", "file2_p", 200, "ck2"));
+
+ ArrayList<CommitPoint.FileInfo> translogFiles = Lists.newArrayList();
+ translogFiles.add(new CommitPoint.FileInfo("t_file1", "t_file1_p", 100, null));
+ translogFiles.add(new CommitPoint.FileInfo("t_file2", "t_file2_p", 200, null));
+
+ CommitPoint commitPoint = new CommitPoint(1, "test", CommitPoint.Type.GENERATED, indexFiles, translogFiles);
+
+ byte[] serialized = CommitPoints.toXContent(commitPoint);
+ logger.info("serialized commit_point {}", new String(serialized, Charsets.UTF_8));
+
+ CommitPoint desCp = CommitPoints.fromXContent(serialized);
+ assertThat(desCp.version(), equalTo(commitPoint.version()));
+ assertThat(desCp.name(), equalTo(commitPoint.name()));
+
+ assertThat(desCp.indexFiles().size(), equalTo(commitPoint.indexFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.indexFiles().get(i).name(), equalTo(commitPoint.indexFiles().get(i).name()));
+ assertThat(desCp.indexFiles().get(i).physicalName(), equalTo(commitPoint.indexFiles().get(i).physicalName()));
+ assertThat(desCp.indexFiles().get(i).length(), equalTo(commitPoint.indexFiles().get(i).length()));
+ assertThat(desCp.indexFiles().get(i).checksum(), equalTo(commitPoint.indexFiles().get(i).checksum()));
+ }
+
+ assertThat(desCp.translogFiles().size(), equalTo(commitPoint.translogFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.translogFiles().get(i).name(), equalTo(commitPoint.translogFiles().get(i).name()));
+ assertThat(desCp.translogFiles().get(i).physicalName(), equalTo(commitPoint.translogFiles().get(i).physicalName()));
+ assertThat(desCp.translogFiles().get(i).length(), equalTo(commitPoint.translogFiles().get(i).length()));
+ assertThat(desCp.translogFiles().get(i).checksum(), nullValue());
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java b/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java
new file mode 100644
index 0000000..5fb90e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/MapperTestUtils.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatService;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatService;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+
+/**
+ *
+ */
+public class MapperTestUtils {
+
+ public static DocumentMapperParser newParser() {
+ return new DocumentMapperParser(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS, newAnalysisService(), new PostingsFormatService(new Index("test")),
+ new DocValuesFormatService(new Index("test")), newSimilarityLookupService());
+ }
+
+ public static DocumentMapperParser newParser(Settings indexSettings) {
+ return new DocumentMapperParser(new Index("test"), indexSettings, newAnalysisService(indexSettings), new PostingsFormatService(new Index("test")),
+ new DocValuesFormatService(new Index("test")), newSimilarityLookupService());
+ }
+
+ public static MapperService newMapperService() {
+ return newMapperService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static MapperService newMapperService(Index index, Settings indexSettings) {
+ return new MapperService(index, indexSettings, new Environment(), newAnalysisService(), new IndexFieldDataService(index, new DummyCircuitBreakerService()),
+ new PostingsFormatService(index), new DocValuesFormatService(index), newSimilarityLookupService());
+ }
+
+ public static AnalysisService newAnalysisService() {
+ return newAnalysisService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static AnalysisService newAnalysisService(Settings indexSettings) {
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(indexSettings), new EnvironmentModule(new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(new Index("test"), indexSettings),
+ new IndexNameModule(new Index("test")),
+ new AnalysisModule(indexSettings, parentInjector.getInstance(IndicesAnalysisService.class))).createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+
+ public static SimilarityLookupService newSimilarityLookupService() {
+ return new SimilarityLookupService(new Index("test"), ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/UidTests.java b/src/test/java/org/elasticsearch/index/mapper/UidTests.java
new file mode 100644
index 0000000..0720447
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/UidTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.HashedBytesArray;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class UidTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCreateAndSplitId() {
+ BytesRef createUid = Uid.createUidAsBytes("foo", "bar");
+ HashedBytesArray[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
+ // split also with an offset
+ BytesRef ref = new BytesRef(createUid.length+10);
+ ref.offset = 9;
+ ref.length = createUid.length;
+ System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length);
+ splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
new file mode 100644
index 0000000..1f15caf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.all;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllField;
+import org.elasticsearch.common.lucene.all.AllTermQuery;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleAllMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleAllMappers() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+ }
+
+ @Test
+ public void testAllMappersNoBoost() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(TermQuery.class));
+ }
+
+ @Test
+ public void testAllMappersTermQuery() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ FieldMapper mapper = docMapper.mappers().smartNameFieldMapper("_all");
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(TermQuery.class));
+
+ }
+
+
+ @Test
+ public void testSimpleAllMappersWithReparse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ }
+
+ @Test
+ public void testSimpleAllMappersWithStore() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ @Test
+ public void testSimpleAllMappersWithReparseWithStore() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ boolean omitNorms = false;
+ boolean stored = false;
+ boolean enabled = true;
+ boolean autoBoost = false;
+ boolean tv_stored = false;
+ boolean tv_payloads = false;
+ boolean tv_offsets = false;
+ boolean tv_positions = false;
+ String similarity = null;
+ boolean fieldData = false;
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("test");
+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<Tuple<String, Boolean>>();
+ boolean allDefault = true;
+ if (frequently()) {
+ allDefault = false;
+ mappingBuilder.startObject("_all");
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("omit_norms", omitNorms = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store", stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vectors", tv_stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("enabled", enabled = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("auto_boost", autoBoost = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_offsets", tv_offsets = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_positions", tv_positions = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<String, Boolean>("store_term_vector_payloads", tv_payloads = randomBoolean()));
+ }
+ Collections.shuffle(booleanOptionList, getRandom());
+ for (Tuple<String, Boolean> option : booleanOptionList) {
+ mappingBuilder.field(option.v1(), option.v2().booleanValue());
+ }
+ tv_stored |= tv_positions || tv_payloads || tv_offsets;
+ if (randomBoolean()) {
+ mappingBuilder.field("similarity", similarity = randomBoolean() ? "BM25" : "TF/IDF");
+ }
+ if (randomBoolean()) {
+ fieldData = true;
+ mappingBuilder.startObject("fielddata");
+ mappingBuilder.field("foo", "bar");
+ mappingBuilder.endObject();
+ }
+ mappingBuilder.endObject();
+ }
+
+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
+ logger.info(mapping);
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+
+ byte[] json = jsonBuilder().startObject()
+ .field("foo", "bar")
+ .field("_id", 1)
+ .field("foobar", "foobar")
+ .endObject().bytes().array();
+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ if (enabled) {
+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));
+ assertThat(field.fieldType().stored(), equalTo(stored));
+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));
+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));
+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));
+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));
+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("foobar"), equalTo(true));
+ assertThat(allEntries.fields().contains("foo"), equalTo(true));
+ if (!stored) {
+ assertThat(field.stringValue(), nullValue());
+ }
+ String text = stored ? field.stringValue() : "bar foobar";
+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));
+ } else {
+ assertThat(field, nullValue());
+ }
+
+ Term term = new Term("foo", "bar");
+ Query query = builtDocMapper.allFieldMapper().queryStringTermQuery(term);
+ if (autoBoost) {
+ assertThat(query, equalTo((Query)new AllTermQuery(term)));
+ } else {
+ assertThat(query, equalTo((Query)new TermQuery(term)));
+ }
+ if (similarity == null || similarity.equals("TF/IDF")) {
+ assertThat(builtDocMapper.allFieldMapper().similarity(), nullValue());
+ } else {
+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().similarity().name()));
+ }
+ assertThat(builtMapping.contains("fielddata"), is(fieldData));
+ if (allDefault) {
+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);
+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);
+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);
+ xContentBuilder.flush();
+ assertThat(bytesStreamOutput.size(), equalTo(0));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
new file mode 100644
index 0000000..6c2120f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
@@ -0,0 +1,57 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "omit_norms":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json b/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
new file mode 100644
index 0000000..e232192
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled": true ,
+ "index_options" : "freqs"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
new file mode 100644
index 0000000..ecbf315
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json b/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
new file mode 100644
index 0000000..7fc9283
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "store":"yes"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes",
+ "index_name":"firstLocation"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string"
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/all/test1.json b/src/test/java/org/elasticsearch/index/mapper/all/test1.json
new file mode 100644
index 0000000..834400a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/all/test1.json
@@ -0,0 +1,18 @@
+{
+ "_boost":3.7,
+ "_id":"1",
+ "name":{
+ "first":"shay",
+ "last":"banon"
+ },
+ "address":{
+ "first":{
+ "location":"first location"
+ },
+ "last":{
+ "location":"last location"
+ }
+ },
+ "simple1":1,
+ "simple2":2
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java
new file mode 100644
index 0000000..9e63f65
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/analyzer/AnalyzerMapperTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.analyzer;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.analysis.FieldNameAnalyzer;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class AnalyzerMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAnalyzerMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+
+
+ @Test
+ public void testAnalyzerMappingExplicit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field_analyzer").field("type", "string").endObject()
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+
+ @Test
+ public void testAnalyzerMappingNotIndexedNorStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_analyzer").field("path", "field_analyzer").endObject()
+ .startObject("properties")
+ .startObject("field_analyzer").field("type", "string").field("index", "no").field("store", "no").endObject()
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("field2").field("type", "string").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ FieldNameAnalyzer analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+
+ // check that it serializes and de-serializes correctly
+
+ DocumentMapper reparsedMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ doc = reparsedMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field_analyzer", "whitespace")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ analyzer = (FieldNameAnalyzer) doc.analyzer();
+ assertThat(((NamedAnalyzer) analyzer.defaultAnalyzer()).name(), equalTo("whitespace"));
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field1")), nullValue());
+ assertThat(((NamedAnalyzer) analyzer.analyzers().get("field2")).name(), equalTo("simple"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
new file mode 100644
index 0000000..38e2fa1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.binary;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ */
+public class BinaryMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field");
+ assertThat(fieldMapper, instanceOf(BinaryFieldMapper.class));
+ assertThat(fieldMapper.fieldType().stored(), equalTo(false));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java
new file mode 100644
index 0000000..f009344
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/BoostMappingTests.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.BoostFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class BoostMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("_boost", 2.0f)
+ .field("field", "a")
+ .field("field", "b")
+ .endObject().bytes());
+
+ // one fo the same named field will have the proper boost, the others will have 1
+ IndexableField[] fields = doc.rootDoc().getFields("field");
+ assertThat(fields[0].boost(), equalTo(2.0f));
+ assertThat(fields[1].boost(), equalTo(1.0f));
+ }
+
+ @Test
+ public void testCustomName() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_boost").field("name", "custom_boost").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "a")
+ .field("_boost", 2.0f)
+ .endObject().bytes());
+ assertThat(doc.rootDoc().getField("field").boost(), equalTo(1.0f));
+
+ doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "a")
+ .field("custom_boost", 2.0f)
+ .endObject().bytes());
+ assertThat(doc.rootDoc().getField("field").boost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.boostFieldMapper().fieldType().stored(), equalTo(BoostFieldMapper.Defaults.FIELD_TYPE.stored()));
+ assertThat(docMapper.boostFieldMapper().fieldType().indexed(), equalTo(BoostFieldMapper.Defaults.FIELD_TYPE.indexed()));
+ }
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_boost")
+ .field("store", "yes").field("index", "not_analyzed")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.boostFieldMapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.boostFieldMapper().fieldType().indexed(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
new file mode 100644
index 0000000..17f0920
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CustomBoostMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomBoostValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("s_field").field("type", "string").endObject()
+ .startObject("l_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("i_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("sh_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("b_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("d_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("f_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("s_field").field("value", "s_value").field("boost", 2.0f).endObject()
+ .startObject("l_field").field("value", 1l).field("boost", 3.0f).endObject()
+ .startObject("i_field").field("value", 1).field("boost", 4.0f).endObject()
+ .startObject("sh_field").field("value", 1).field("boost", 5.0f).endObject()
+ .startObject("b_field").field("value", 1).field("boost", 6.0f).endObject()
+ .startObject("d_field").field("value", 1).field("boost", 7.0f).endObject()
+ .startObject("f_field").field("value", 1).field("boost", 8.0f).endObject()
+ .startObject("date_field").field("value", "20100101").field("boost", 9.0f).endObject()
+ .endObject().bytes());
+
+ assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f));
+ assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f));
+ assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f));
+ assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f));
+ assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f));
+ assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f));
+ assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f));
+ assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java b/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
new file mode 100644
index 0000000..c949cbb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+
+/**
+ */
+public class FieldLevelBoostTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = XContentFactory.jsonBuilder().startObject().field("_id", "1")
+ .startObject("str_field").field("boost", 2.0).field("value", "some name").endObject()
+ .startObject("int_field").field("boost", 3.0).field("value", 10).endObject()
+ .startObject("byte_field").field("boost", 4.0).field("value", 20).endObject()
+ .startObject("date_field").field("boost", 5.0).field("value", "2012-01-10").endObject()
+ .startObject("double_field").field("boost", 6.0).field("value", 30.0).endObject()
+ .startObject("float_field").field("boost", 7.0).field("value", 40.0).endObject()
+ .startObject("long_field").field("boost", 8.0).field("value", 50).endObject()
+ .startObject("short_field").field("boost", 9.0).field("value", 60).endObject()
+ .bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("str_field");
+ assertThat((double) f.boost(), closeTo(2.0, 0.001));
+
+ f = doc.getField("int_field");
+ assertThat((double) f.boost(), closeTo(3.0, 0.001));
+
+ f = doc.getField("byte_field");
+ assertThat((double) f.boost(), closeTo(4.0, 0.001));
+
+ f = doc.getField("date_field");
+ assertThat((double) f.boost(), closeTo(5.0, 0.001));
+
+ f = doc.getField("double_field");
+ assertThat((double) f.boost(), closeTo(6.0, 0.001));
+
+ f = doc.getField("float_field");
+ assertThat((double) f.boost(), closeTo(7.0, 0.001));
+
+ f = doc.getField("long_field");
+ assertThat((double) f.boost(), closeTo(8.0, 0.001));
+
+ f = doc.getField("short_field");
+ assertThat((double) f.boost(), closeTo(9.0, 0.001));
+ }
+
+ @Test
+ public void testInvalidFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("str_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("int_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("byte_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("date_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("double_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("float_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("long_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse(XContentFactory.jsonBuilder().startObject()
+ .field("_id", "1").startObject("short_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java b/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
new file mode 100644
index 0000000..c97a396
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.camelcase;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class CamelCaseFieldNameTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCamelCaseFieldNameStaysAsIs() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("thisIsCamelCase", "value1")
+ .endObject().bytes());
+
+ assertThat(documentMapper.mappers().indexName("thisIsCamelCase").isEmpty(), equalTo(false));
+ assertThat(documentMapper.mappers().indexName("this_is_camel_case"), nullValue());
+
+ documentMapper.refreshSource();
+ documentMapper = MapperTestUtils.newParser().parse(documentMapper.mappingSource().string());
+
+ assertThat(documentMapper.mappers().indexName("thisIsCamelCase").isEmpty(), equalTo(false));
+ assertThat(documentMapper.mappers().indexName("this_is_camel_case"), nullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
new file mode 100644
index 0000000..683b8d4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.completion;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionFieldMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ assertThat(completionFieldMapper.isStoringPayloads(), is(false));
+ }
+
+ @Test
+ public void testThatSerializationIncludesAllElements() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("index_analyzer", "simple")
+ .field("search_analyzer", "standard")
+ .field("payloads", true)
+ .field("preserve_separators", false)
+ .field("preserve_position_increments", true)
+ .field("max_input_length", 14)
+
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("index_analyzer").toString(), is("simple"));
+ assertThat(configMap.get("search_analyzer").toString(), is("standard"));
+ assertThat(Boolean.valueOf(configMap.get("payloads").toString()), is(true));
+ assertThat(Boolean.valueOf(configMap.get("preserve_separators").toString()), is(false));
+ assertThat(Boolean.valueOf(configMap.get("preserve_position_increments").toString()), is(true));
+ assertThat(Integer.valueOf(configMap.get("max_input_length").toString()), is(14));
+ }
+
+ @Test
+ public void testThatSerializationCombinesToOneAnalyzerFieldIfBothAreEqual() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("index_analyzer", "simple")
+ .field("search_analyzer", "simple")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("analyzer").toString(), is("simple"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java b/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
new file mode 100644
index 0000000..c1825f3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.compound;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompoundTypesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testStringType() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("field1").field("value", "value1").field("boost", 2.0f).endObject()
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(2.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
new file mode 100644
index 0000000..40fccf0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class CopyToMapperIntegrationTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testDynamicTemplateCopyTo() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("test-idx")
+ .addMapping("doc", createDynamicTemplateMapping())
+ );
+
+ int recordCount = between(1, 200);
+
+ for (int i = 0; i < recordCount * 2; i++) {
+ client().prepareIndex("test-idx", "doc", Integer.toString(i))
+ .setSource("test_field", "test " + i, "even", i % 2 == 0)
+ .get();
+ }
+ client().admin().indices().prepareRefresh("test-idx").execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("test-idx")
+ .setQuery(QueryBuilders.termQuery("even", true))
+ .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2))
+ .addAggregation(AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) recordCount));
+
+ assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1));
+ assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount));
+
+ }
+
+
+ private XContentBuilder createDynamicTemplateMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startArray("dynamic_templates")
+
+ .startObject().startObject("template_raw")
+ .field("match", "*_raw")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("index", "not_analyzed").endObject()
+ .endObject().endObject()
+
+ .startObject().startObject("template_all")
+ .field("match", "*")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("copy_to", "{name}_raw").endObject()
+ .endObject().endObject()
+
+ .endArray();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
new file mode 100644
index 0000000..35fca85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class CopyToMapperTests extends ElasticsearchTestCase {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "another_field", "cyclic_test")
+ .endObject()
+
+ .startObject("another_field")
+ .field("type", "string")
+ .endObject()
+
+ .startObject("cyclic_test")
+ .field("type", "string")
+ .array("copy_to", "copy_test")
+ .endObject()
+
+ .startObject("int_to_str_test")
+ .field("type", "integer")
+ .array("copy_to", "another_field", "new_field")
+ .endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = docMapper.mappers().name("copy_test").mapper();
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+
+ // Check json serialization
+ StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ stringFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> copyTestMap = (Map<String, Object>) serializedMap.get("copy_test");
+ assertThat(copyTestMap.get("type").toString(), is("string"));
+ List<String> copyToList = (List<String>) copyTestMap.get("copy_to");
+ assertThat(copyToList.size(), equalTo(2));
+ assertThat(copyToList.get(0).toString(), equalTo("another_field"));
+ assertThat(copyToList.get(1).toString(), equalTo("cyclic_test"));
+
+ // Check data parsing
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .field("cyclic_test", "bar")
+ .field("int_to_str_test", 42)
+ .endObject().bytes();
+
+ ParseContext.Document doc = docMapper.parse("type1", "1", json).rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(2));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("copy_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("another_field").length, equalTo(2));
+ assertThat(doc.getFields("another_field")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("another_field")[1].stringValue(), equalTo("42"));
+
+ assertThat(doc.getFields("cyclic_test").length, equalTo(2));
+ assertThat(doc.getFields("cyclic_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("cyclic_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("int_to_str_test").length, equalTo(1));
+ assertThat(doc.getFields("int_to_str_test")[0].numericValue().intValue(), equalTo(42));
+
+ assertThat(doc.getFields("new_field").length, equalTo(1));
+ assertThat(doc.getFields("new_field")[0].numericValue().intValue(), equalTo(42));
+
+ fieldMapper = docMapper.mappers().name("new_field").mapper();
+ assertThat(fieldMapper, instanceOf(LongFieldMapper.class));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .startObject("very")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("inner")
+ .field("type", "object")
+ .endObject()
+ .endObject()
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .startObject("foo").startObject("bar").field("baz", "zoo").endObject().endObject()
+ .endObject().bytes();
+
+ ParseContext.Document doc = docMapper.parse("type1", "1", json).rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(1));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+
+ assertThat(doc.getFields("very.inner.field").length, equalTo(1));
+ assertThat(doc.getFields("very.inner.field")[0].stringValue(), equalTo("foo"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsNonExistingInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .endObject().bytes();
+
+ try {
+ docMapper.parse("type1", "1", json).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ assertThat(ex.getMessage(), startsWith("attempt to copy value to non-existing object"));
+ }
+ }
+
+ @Test
+ public void testCopyToFieldMerge() throws Exception {
+
+ String mappingBefore = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "foo", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ String mappingAfter = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "baz", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapperBefore = MapperTestUtils.newParser().parse(mappingBefore);
+
+ ImmutableList<String> fields = docMapperBefore.mappers().name("copy_test").mapper().copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("foo"));
+ assertThat(fields.get(1), equalTo("bar"));
+
+
+ DocumentMapper docMapperAfter = MapperTestUtils.newParser().parse(mappingAfter);
+
+ DocumentMapper.MergeResult mergeResult = docMapperBefore.merge(docMapperAfter, mergeFlags().simulate(true));
+
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapperBefore.merge(docMapperAfter, mergeFlags().simulate(false));
+
+ fields = docMapperBefore.mappers().name("copy_test").mapper().copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("baz"));
+ assertThat(fields.get(1), equalTo("bar"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
new file mode 100644
index 0000000..bbfd82d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacetBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrationTest {
+ @ParametersFactory
+ public static Iterable<Object[]> buildParameters() {
+ List<Object[]> parameters = new ArrayList<Object[]>();
+ for (boolean storeCountedFields : new boolean[] { true, false }) {
+ for (boolean loadCountedFields : new boolean[] { true, false }) {
+ parameters.add(new Object[] { storeCountedFields, loadCountedFields });
+ }
+ }
+ return parameters;
+ }
+
+ private final boolean storeCountedFields;
+ private final boolean loadCountedFields;
+
+ public TokenCountFieldMapperIntegrationTests(@Name("storeCountedFields") boolean storeCountedFields,
+ @Name("loadCountedFields") boolean loadCountedFields) {
+ this.storeCountedFields = storeCountedFields;
+ this.loadCountedFields = loadCountedFields;
+ }
+
+ /**
+ * It is possible to get the token count in a search response.
+ */
+ @Test
+ public void searchReturnsTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ assertSearchReturns(searchById("single"), "single");
+ assertSearchReturns(searchById("bulk1"), "bulk1");
+ assertSearchReturns(searchById("bulk2"), "bulk2");
+ assertSearchReturns(searchById("multi"), "multi");
+ assertSearchReturns(searchById("multibulk1"), "multibulk1");
+ assertSearchReturns(searchById("multibulk2"), "multibulk2");
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void searchByTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ assertSearchReturns(searchByNumericRange(4, 4).get(), "single");
+ assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2");
+ assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(12, 12).get());
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void facetByTokenCount() throws ElasticsearchException, IOException {
+ init();
+
+ String facetField = randomFrom(ImmutableList.of(
+ "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"));
+ SearchResponse result = searchByNumericRange(1, 10)
+ .addFacet(new TermsFacetBuilder("facet").field(facetField)).get();
+ assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertThat(result.getFacets().facets().size(), equalTo(1));
+ TermsFacet facet = (TermsFacet) result.getFacets().facets().get(0);
+ assertThat(facet.getEntries().size(), equalTo(9));
+ }
+
+ private void init() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("test", jsonBuilder().startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("store", storeCountedFields)
+ .field("analyzer", "simple")
+ .endObject()
+ .startObject("token_count")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .field("store", true)
+ .endObject()
+ .startObject("token_count_unstored")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("token_count_with_doc_values")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .startObject("fielddata")
+ .field("format", LuceneTestCase.defaultCodecSupportsSortedSet() ? "doc_values" : null)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()).get();
+ ensureGreen();
+
+ assertTrue(prepareIndex("single", "I have four terms").get().isCreated());
+ BulkResponse bulk = client().prepareBulk()
+ .add(prepareIndex("bulk1", "bulk three terms"))
+ .add(prepareIndex("bulk2", "this has five bulk terms")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+ assertTrue(prepareIndex("multi", "two terms", "wow now I have seven lucky terms").get().isCreated());
+ bulk = client().prepareBulk()
+ .add(prepareIndex("multibulk1", "one", "oh wow now I have eight unlucky terms"))
+ .add(prepareIndex("multibulk2", "six is a bunch of terms", "ten! ten terms is just crazy! too many too count!")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+
+ assertThat(refresh().getFailedShards(), equalTo(0));
+ }
+
+ private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException {
+ return client().prepareIndex("test", "test", id).setSource("foo", texts);
+ }
+
+ private SearchResponse searchById(String id) {
+ return prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).get();
+ }
+
+ private SearchRequestBuilder searchByNumericRange(int low, int high) {
+ return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom(
+ ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")
+ )).gte(low).lte(high));
+ }
+
+ private SearchRequestBuilder prepareSearch() {
+ SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
+ request.addField("foo.token_count");
+ if (loadCountedFields) {
+ request.addField("foo");
+ }
+ return request;
+ }
+
+ private void assertSearchReturns(SearchResponse result, String... ids) {
+ assertThat(result.getHits().getTotalHits(), equalTo((long) ids.length));
+ assertThat(result.getHits().hits().length, equalTo(ids.length));
+ List<String> foundIds = new ArrayList<String>();
+ for (SearchHit hit : result.getHits()) {
+ foundIds.add(hit.id());
+ }
+ assertThat(foundIds, containsInAnyOrder(ids));
+ for (SearchHit hit : result.getHits()) {
+ String id = hit.id();
+ if (id.equals("single")) {
+ assertSearchHit(hit, 4);
+ } else if (id.equals("bulk1")) {
+ assertSearchHit(hit, 3);
+ } else if (id.equals("bulk2")) {
+ assertSearchHit(hit, 5);
+ } else if (id.equals("multi")) {
+ assertSearchHit(hit, 2, 7);
+ } else if (id.equals("multibulk1")) {
+ assertSearchHit(hit, 1, 8);
+ } else if (id.equals("multibulk2")) {
+ assertSearchHit(hit, 6, 10);
+ } else {
+ throw new ElasticsearchException("Unexpected response!");
+ }
+ }
+ }
+
+ private void assertSearchHit(SearchHit hit, int... termCounts) {
+ assertThat(hit.field("foo.token_count"), not(nullValue()));
+ assertThat(hit.field("foo.token_count").values().size(), equalTo(termCounts.length));
+ for (int i = 0; i < termCounts.length; i++) {
+ assertThat((Integer) hit.field("foo.token_count").values().get(i), equalTo(termCounts[i]));
+ }
+
+ if (loadCountedFields && storeCountedFields) {
+ assertThat(hit.field("foo").values().size(), equalTo(termCounts.length));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
new file mode 100644
index 0000000..284a33a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test for {@link TokenCountFieldMapper}.
+ */
+public class TokenCountFieldMapperTests extends ElasticsearchTestCase {
+ @Test
+ public void testMerge() throws IOException {
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage1 = MapperTestUtils.newParser().parse(stage1Mapping);
+
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage2 = MapperTestUtils.newParser().parse(stage2Mapping);
+
+ DocumentMapper.MergeResult mergeResult = stage1.merge(stage2, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("keyword"));
+
+ mergeResult = stage1.merge(stage2, mergeFlags().simulate(false));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("standard"));
+ }
+
+ @Test
+ public void testCountPositions() throws IOException {
+ // We're looking to make sure that we:
+ Token t1 = new Token(); // Don't count tokens without an increment
+ t1.setPositionIncrement(0);
+ Token t2 = new Token();
+ t2.setPositionIncrement(1); // Count normal tokens with one increment
+ Token t3 = new Token();
+ t2.setPositionIncrement(2); // Count funny tokens with more than one increment
+ int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
+ Token[] tokens = new Token[] {t1, t2, t3};
+ Collections.shuffle(Arrays.asList(tokens), getRandom());
+ TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
+ assertThat(TokenCountFieldMapper.countPositions(tokenStream), equalTo(7));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
new file mode 100644
index 0000000..de72c19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.date;
+
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.NumericRangeFilter;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleDateMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAutomaticDateParser() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field1", "2011/01/22")
+ .field("date_field2", "2011/01/22 00:00:00")
+ .field("wrong_date1", "-4")
+ .field("wrong_date2", "2012/2")
+ .field("wrong_date3", "2012/test")
+ .endObject()
+ .bytes());
+
+ FieldMapper<?> fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field1");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field2");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ }
+
+ @Test
+ public void testParseLocal() {
+ assertThat(Locale.GERMAN, equalTo(DateFieldMapper.parseLocale("de")));
+ assertThat(Locale.GERMANY, equalTo(DateFieldMapper.parseLocale("de_DE")));
+ assertThat(new Locale("de","DE","DE"), equalTo(DateFieldMapper.parseLocale("de_DE_DE")));
+
+ try {
+ DateFieldMapper.parseLocale("de_DE_DE_DE");
+ fail();
+ } catch(ElasticsearchIllegalArgumentException ex) {
+ // expected
+ }
+ assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("")));
+ assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("ROOT")));
+ }
+
+ @Test
+ public void testLocale() throws IOException {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("date_field_default")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .endObject()
+ .startObject("date_field_en")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "EN")
+ .endObject()
+ .startObject("date_field_de")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "DE_de")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field_en", "Wed, 06 Dec 2000 02:55:00 -0800")
+ .field("date_field_de", "Mi, 06 Dez 2000 02:55:00 -0800")
+ .field("date_field_default", "Wed, 06 Dec 2000 02:55:00 -0800") // check default - no exception is a successs!
+ .endObject()
+ .bytes());
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_de");
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_default");
+ }
+
+ private DocumentMapper mapper(String mapping) throws IOException {
+ // we serialize and deserialize the mapping to make sure serialization works just fine
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject();
+ defaultMapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String rebuildMapping = builder.string();
+ return MapperTestUtils.newParser().parse(rebuildMapping);
+ }
+
+ private void assertNumericTokensEqual(ParsedDocument doc, DocumentMapper defaultMapper, String fieldA, String fieldB) throws IOException {
+ assertThat(doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+ assertThat(doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+
+ TokenStream tokenStream = doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer());
+ tokenStream.reset();
+ NumericTermAttribute nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ List<Long> values = new ArrayList<Long>();
+ while(tokenStream.incrementToken()) {
+ values.add(nta.getRawValue());
+ }
+
+ tokenStream = doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer());
+ tokenStream.reset();
+ nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ int pos = 0;
+ while(tokenStream.incrementToken()) {
+ assertThat(values.get(pos++), equalTo(nta.getRawValue()));
+ }
+ assertThat(pos, equalTo(values.size()));
+ }
+
+ @Test
+ public void testTimestampAsDate() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ long value = System.currentTimeMillis();
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", value)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("date_field").tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDateDetection() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "2010-01-01")
+ .field("date_field_x", "2010-01-01")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("date_field"), nullValue());
+ assertThat(doc.rootDoc().get("date_field_x"), equalTo("2010-01-01"));
+ }
+
+ @Test
+ public void testHourFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis())));
+
+ Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("10:00:00", "11:00:00", true, true, null);
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
+ assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis() + 999).getMillis())); // +999 to include the 00-01 minute
+ assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis()).getMillis()));
+ }
+
+
+ @Test
+ public void testDayWithoutYearFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "MMM dd HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "Jan 02 10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())));
+
+ Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("Jan 02 10:00:00", "Jan 02 11:00:00", true, true, null);
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
+ assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis() + 999).getMillis())); // +999 to include the 00-01 minute
+ assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis()).getMillis()));
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "date").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "date").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "date").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "2010-01-01")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+ }
+
+ @Test
+ public void testThatMergingWorks() throws Exception {
+ String initialMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field").field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper(initialMapping);
+ DocumentMapper mergeMapper = mapper(updatedMapping);
+
+ assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
+ DateFieldMapper initialDateFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
+ Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper);
+ assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy"));
+
+ DocumentMapper.MergeResult mergeResult = defaultMapper.merge(mergeMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+
+ assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.conflicts()), mergeResult.hasConflicts(), is(false));
+ assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
+
+ DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
+ Map<String, String> mergedConfig = getConfigurationViaXContent(mergedFieldMapper);
+ assertThat(mergedConfig.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ"));
+ }
+
+ private Map<String, String> getConfigurationViaXContent(DateFieldMapper dateFieldMapper) throws IOException {
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ dateFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ Map<String, Object> dateFieldMapperMap = JsonXContent.jsonXContent.createParser(builder.string()).mapAndClose();
+ assertThat(dateFieldMapperMap, hasKey("field"));
+ assertThat(dateFieldMapperMap.get("field"), is(instanceOf(Map.class)));
+ return (Map<String, String>) dateFieldMapperMap.get("field");
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java
new file mode 100644
index 0000000..17e30f0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.dynamic;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.StrictDynamicMappingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DynamicMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDynamicTrue() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "true")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+
+ @Test
+ public void testDynamicFalse() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), nullValue());
+ }
+
+
+ @Test
+ public void testDynamicStrict() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("obj1.field2"), nullValue());
+ }
+
+ @Test
+ public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
new file mode 100644
index 0000000..61f4e9f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.genericstore;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GenericStoreDynamicTemplateTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+
+ f = doc.getField("age");
+ assertThat(f.name(), equalTo("age"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("age");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
new file mode 100644
index 0000000..75b8a0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
@@ -0,0 +1,5 @@
+{
+ "_id":"1",
+ "name":"some name",
+ "age":1
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
new file mode 100644
index 0000000..d99067c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
@@ -0,0 +1,14 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "match":"*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
new file mode 100644
index 0000000..05546e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.pathmatch;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PathMatchDynamicTemplateTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("top_level"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
+
+ f = doc.getField("obj1.name");
+ assertThat(f.name(), equalTo("obj1.name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("obj1.name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
+
+ f = doc.getField("obj1.obj2.name");
+ assertThat(f.name(), equalTo("obj1.obj2.name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("obj1.obj2.name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
+
+ // verify more complex path_match expressions
+
+ fieldMappers = docMapper.mappers().fullName("obj3.obj4.prop1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
new file mode 100644
index 0000000..818bedd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
@@ -0,0 +1,15 @@
+{
+ "_id":"1",
+ "name":"top_level",
+ "obj1":{
+ "name":"obj1_level",
+ "obj2":{
+ "name":"obj2_level"
+ }
+ },
+ "obj3":{
+ "obj4":{
+ "prop1":"prop1_value"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
new file mode 100644
index 0000000..dce33da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
@@ -0,0 +1,30 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "path_match":"obj1.obj2.*",
+ "mapping":{
+ "store":"no"
+ }
+ }
+ },
+ {
+ "template_2":{
+ "path_match":"obj1.*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ },
+ {
+ "template_3":{
+ "path_match":"*.obj4.*",
+ "mapping":{
+ "type":"string"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
new file mode 100644
index 0000000..e11c703
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.simple;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleDynamicTemplatesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMatchTypeOnly() throws Exception {
+ XContentBuilder builder = JsonXContent.contentBuilder();
+ builder.startObject().startObject("person").startArray("dynamic_templates").startObject().startObject("test")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("index", "no").endObject()
+ .endObject().endObject().endArray().endObject().endObject();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(builder.string());
+ builder = JsonXContent.contentBuilder();
+ builder.startObject().field("_id", "1").field("s", "hello").field("l", 1).endObject();
+ docMapper.parse(builder.bytes());
+
+ DocumentFieldMappers mappers = docMapper.mappers();
+
+ assertThat(mappers.smartName("s"), Matchers.notNullValue());
+ assertThat(mappers.smartName("s").mapper().fieldType().indexed(), equalTo(false));
+
+ assertThat(mappers.smartName("l"), Matchers.notNullValue());
+ assertThat(mappers.smartName("l").mapper().fieldType().indexed(), equalTo(true));
+
+
+ }
+
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi1.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi2");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi2.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+
+ @Test
+ public void testSimpleWithXContentTraverse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ docMapper.refreshSource();
+ docMapper = MapperTestUtils.newParser().parse(docMapper.mappingSource().string());
+
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMappers fieldMappers = docMapper.mappers().fullName("name");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi1");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi1.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMappers = docMapper.mappers().fullName("multi2");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMappers = docMapper.mappers().fullName("multi2.org");
+ assertThat(fieldMappers.mappers().size(), equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
new file mode 100644
index 0000000..5682d1d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
@@ -0,0 +1,7 @@
+{
+ "_id":"1",
+ "name":"some name",
+ "age":1,
+ "multi1":"multi 1",
+ "multi2":"multi 2"
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
new file mode 100644
index 0000000..fa0293c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
@@ -0,0 +1,36 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "tempalte_1":{
+ "match":"multi*",
+ "mapping":{
+ "type":"multi_field",
+ "fields":{
+ "{name}":{
+ "type":"{dynamic_type}",
+ "index":"analyzed",
+ "store":"yes"
+ },
+ "org":{
+ "type":"{dynamic_type}",
+ "index":"not_analyzed",
+ "store":"yes"
+ }
+ }
+ }
+ }
+ },
+ {
+ "template_2":{
+ "match":"*",
+ "match_mapping_type":"string",
+ "mapping":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
new file mode 100644
index 0000000..f52363e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+
+public class GeoEncodingTests extends ElasticsearchTestCase {
+
+ public void test() {
+ for (int i = 0; i < 10000; ++i) {
+ final double lat = randomDouble() * 180 - 90;
+ final double lon = randomDouble() * 360 - 180;
+ final Distance precision = new Distance(1+(randomDouble() * 9), randomFrom(Arrays.asList(DistanceUnit.MILLIMETERS, DistanceUnit.METERS, DistanceUnit.KILOMETERS)));
+ final GeoPointFieldMapper.Encoding encoding = GeoPointFieldMapper.Encoding.of(precision);
+ assertThat(encoding.precision().convert(DistanceUnit.METERS).value, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ final GeoPoint geoPoint = encoding.decode(encoding.encodeCoordinate(lat), encoding.encodeCoordinate(lon), new GeoPoint());
+ final double error = GeoDistance.PLANE.calculate(lat, lon, geoPoint.lat(), geoPoint.lon(), DistanceUnit.METERS);
+ assertThat(error, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
new file mode 100644
index 0000000..44c9575
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Map;
+
+public class GeoMappingTests extends ElasticsearchIntegrationTest {
+
+ public void testUpdatePrecision() throws Exception {
+ prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "2mm")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureYellow();
+ assertPrecision(new Distance(2, DistanceUnit.MILLIMETERS));
+
+ client().admin().indices().preparePutMapping("test").setType("type1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "11m")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ assertPrecision(new Distance(11, DistanceUnit.METERS));
+ }
+
+ private void assertPrecision(Distance expected) throws Exception {
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = client().admin().indices().getMappings(new GetMappingsRequest().indices("test").types("type1")).actionGet().getMappings();
+ assertNotNull(mappings);
+ Map<String, ?> properties = (Map<String, ?>) mappings.get("test").get("type1").getSourceAsMap().get("properties");
+ Map<String, ?> pinProperties = (Map<String, ?>) properties.get("pin");
+ Map<String, ?> pinFieldData = (Map<String, ?>) pinProperties.get("fielddata");
+ Distance precision = Distance.parseDistance(pinFieldData.get("precision").toString());
+ assertEquals(expected, precision);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
new file mode 100644
index 0000000..e2a2be5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.geo;
+
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class GeoShapeFieldMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(GeoShapeFieldMapper.Defaults.DISTANCE_ERROR_PCT));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoShapeFieldMapper.Defaults.GEOHASH_LEVELS));
+ }
+
+ @Test
+ public void testGeohashConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "4")
+ .field("distance_error_pct", "0.1")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.1));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(4));
+ }
+
+ @Test
+ public void testQuadtreeConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(6));
+ }
+
+ @Test
+ public void testLevelPrecisionConfiguration() throws IOException {
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ /* 70m is more precise so it wins */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ /* 70m is more precise so it wins */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", GeoUtils.geoHashLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)+1));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", GeoUtils.quadTreeLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)+1));
+ }
+ }
+
+ @Test
+ public void testLevelDefaults() throws IOException {
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(50d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().name("location").mapper();
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(50d)));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
new file mode 100644
index 0000000..ffa8900
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeohashMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsInteger() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash_precision", 10).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartName("point").mapper();
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.geoHashPrecision(), is(10));
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsLength() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash_precision", "5m").endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartName("point").mapper();
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.geoHashPrecision(), is(10));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java
new file mode 100644
index 0000000..64d7758
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonAndGeohashMappingGeoPointTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class LatLonAndGeohashMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java
new file mode 100644
index 0000000..f6b731a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/geo/LatLonMappingGeoPointTests.java
@@ -0,0 +1,411 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class LatLonMappingGeoPointTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNormalizeLatLonValuesDefault() throws Exception {
+ // default to normalize
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 181).field("lon", 361).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0"));
+ }
+
+ @Test
+ public void testValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+ }
+
+
+ @Test
+ public void testNoValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ }
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonValuesStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testArrayLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startObject().field("lat", 1.2).field("lon", 1.3).endObject()
+ .startObject().field("lat", 1.4).field("lon", 1.5).endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .value("1.2,1.3")
+ .value("1.4,1.5")
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testLonLatArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startArray().value(1.3).value(1.2).endArray()
+ .startArray().value(1.5).value(1.4).endArray()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testTryParsingLatLonFromString() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", "52").field("lon", "4").endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+ GeoPoint geoPoint = GeoPoint.parse(parser);
+ assertThat(geoPoint.lat(), is(52.0));
+ assertThat(geoPoint.lon(), is(4.0));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
new file mode 100644
index 0000000..adb97e6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.id;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class IdMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleIdTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue());
+
+ try {
+ docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+ }
+
+ doc = docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_id", 1)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue());
+ }
+
+ @Test
+ public void testIdIndexed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("index", "not_analyzed").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), notNullValue());
+
+ doc = docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_id", 1)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), notNullValue());
+ }
+
+ @Test
+ public void testIdPath() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ // serialize the id mapping
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder = docMapper.idFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String serialized_id_mapping = builder.string();
+
+ String expected_id_mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().string();
+
+ assertThat(serialized_id_mapping, equalTo(expected_id_mapping));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
new file mode 100644
index 0000000..ce08da5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.index;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class IndexTypeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(true));
+ assertThat(indexMapper.fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().indexName("_index").mapper(), instanceOf(IndexFieldMapper.class));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), equalTo("test"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void explicitDisabledIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+ assertThat(indexMapper.fieldType().stored(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void defaultDisabledIndexMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+ assertThat(indexMapper.fieldType().stored(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void testThatMergingFieldMappingAllowsDisabling() throws Exception {
+ String mappingWithIndexEnabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapperEnabled = MapperTestUtils.newParser().parse(mappingWithIndexEnabled);
+
+
+ String mappingWithIndexDisabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapperDisabled = MapperTestUtils.newParser().parse(mappingWithIndexDisabled);
+
+ mapperEnabled.merge(mapperDisabled, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+ assertThat(mapperEnabled.IndexFieldMapper().enabled(), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
new file mode 100644
index 0000000..88cd8d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ip;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Ignore;
+
+/**
+ *
+ */
+@Ignore("No tests?")
+public class SimpleIpMappingTests extends ElasticsearchTestCase {
+
+ // No Longer enabled...
+// @Test public void testAutoIpDetection() throws Exception {
+// String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+// .startObject("properties").endObject()
+// .endObject().endObject().string();
+//
+// XContentDocumentMapper defaultMapper = MapperTests.newParser().parse(mapping);
+//
+// ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+// .startObject()
+// .field("ip1", "127.0.0.1")
+// .field("ip2", "0.1")
+// .field("ip3", "127.0.0.1.2")
+// .endObject()
+// .copiedBytes());
+//
+// assertThat(doc.doc().getFieldable("ip1"), notNullValue());
+// assertThat(doc.doc().get("ip1"), nullValue()); // its numeric
+// assertThat(doc.doc().get("ip2"), equalTo("0.1"));
+// assertThat(doc.doc().get("ip3"), equalTo("127.0.0.1.2"));
+// }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java b/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
new file mode 100644
index 0000000..b2f90aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DoubleIndexingDocTest {
+
+ @Test
+ public void testDoubleIndexingSameDoc() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", 1)
+ .field("field3", 1.1)
+ .field("field4", "2010-01-01")
+ .startArray("field5").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs topDocs = searcher.search(mapper.mappers().smartName("field1").mapper().termQuery("value1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field2").mapper().termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field3").mapper().termQuery("1.1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field4").mapper().termQuery("2010-01-01", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("2", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartName("field5").mapper().termQuery("3", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java b/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
new file mode 100644
index 0000000..4283e23
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class StoredNumericValuesTest {
+
+ @Test
+ public void testBytesAndNumericRepresentation() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("store", "yes").endObject()
+ .startObject("field2").field("type", "float").field("store", "yes").endObject()
+ .startObject("field3").field("type", "long").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", 1)
+ .field("field2", 1.1)
+ .startArray("field3").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+
+ writer.addDocument(doc.rootDoc(), doc.analyzer());
+
+ // Indexing a doc in the old way
+ FieldType fieldType = new FieldType();
+ fieldType.setStored(true);
+ fieldType.setNumericType(FieldType.NumericType.INT);
+ Document doc2 = new Document();
+ doc2.add(new StoredField("field1", new BytesRef(Numbers.intToBytes(1))));
+ doc2.add(new StoredField("field2", new BytesRef(Numbers.floatToBytes(1.1f))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(1l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(2l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(3l))));
+ writer.addDocument(doc2);
+
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Set<String> fields = new HashSet<String>(Arrays.asList("field1", "field2", "field3"));
+ CustomFieldsVisitor fieldsVisitor = new CustomFieldsVisitor(fields, false);
+ searcher.doc(0, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ // Make sure the doc gets loaded as if it was stored in the new way
+ fieldsVisitor.reset();
+ searcher.doc(1, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ reader.close();
+ writer.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
new file mode 100644
index 0000000..df695b5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.merge;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TestMergeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void test1Merge() throws Exception {
+
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper stage1 = MapperTestUtils.newParser().parse(stage1Mapping);
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("age").field("type", "integer").endObject()
+ .startObject("obj1").startObject("properties").startObject("prop1").field("type", "integer").endObject().endObject().endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper stage2 = MapperTestUtils.newParser().parse(stage2Mapping);
+
+ DocumentMapper.MergeResult mergeResult = stage1.merge(stage2, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // since we are simulating, we should not have the age mapping
+ assertThat(stage1.mappers().smartName("age"), nullValue());
+ assertThat(stage1.mappers().smartName("obj1.prop1"), nullValue());
+ // now merge, don't simulate
+ mergeResult = stage1.merge(stage2, mergeFlags().simulate(false));
+ // there is still merge failures
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // but we have the age in
+ assertThat(stage1.mappers().smartName("age"), notNullValue());
+ assertThat(stage1.mappers().smartName("obj1.prop1"), notNullValue());
+ }
+
+ @Test
+ public void testMergeObjectDynamic() throws Exception {
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").endObject().endObject().string();
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(objectMapping);
+ assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.TRUE));
+
+ String withDynamicMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").field("dynamic", "false").endObject().endObject().string();
+ DocumentMapper withDynamicMapper = MapperTestUtils.newParser().parse(withDynamicMapping);
+ assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+
+ DocumentMapper.MergeResult mergeResult = mapper.merge(withDynamicMapper, mergeFlags().simulate(false));
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+ }
+
+ @Test
+ public void testMergeObjectAndNested() throws Exception {
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "object").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper objectMapper = MapperTestUtils.newParser().parse(objectMapping);
+ String nestedMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper nestedMapper = MapperTestUtils.newParser().parse(nestedMapping);
+
+ DocumentMapper.MergeResult mergeResult = objectMapper.merge(nestedMapper, mergeFlags().simulate(true));
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.conflicts().length, equalTo(1));
+ assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested"));
+
+ mergeResult = nestedMapper.merge(objectMapper, mergeFlags().simulate(true));
+ assertThat(mergeResult.conflicts().length, equalTo(1));
+ assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested"));
+ }
+
+ @Test
+ public void testMergeSearchAnalyzer() throws Exception {
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "keyword").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = MapperTestUtils.newParser().parse(mapping1);
+ DocumentMapper changed = MapperTestUtils.newParser().parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ DocumentMapper.MergeResult mergeResult = existing.merge(changed, mergeFlags().simulate(false));
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("keyword"));
+ }
+
+ @Test
+ public void testNotChangeSearchAnalyzer() throws Exception {
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("postings_format", "direct").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = MapperTestUtils.newParser().parse(mapping1);
+ DocumentMapper changed = MapperTestUtils.newParser().parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ DocumentMapper.MergeResult mergeResult = existing.merge(changed, mergeFlags().simulate(false));
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
+ assertThat((existing.mappers().name("field").mapper().postingsFormatProvider()).name(), equalTo("direct"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
new file mode 100644
index 0000000..0be1024
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
@@ -0,0 +1,408 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class MultiFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMultiField_multiFieldType() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json");
+ testMultiField(mapping);
+ }
+
+ @Test
+ public void testMultiField_multiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-fields.json");
+ testMultiField(mapping);
+ }
+
+ private void testMultiField(String mapping) throws Exception {
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+
+ f = doc.getField("object1.multi1");
+ assertThat(f.name(), equalTo("object1.multi1"));
+
+ f = doc.getField("object1.multi1.string");
+ assertThat(f.name(), equalTo("object1.multi1.string"));
+ assertThat(f.stringValue(), equalTo("2010-01-01"));
+
+ assertThat(docMapper.mappers().fullName("name").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.test1").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.test1").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldType().tokenized(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test1").mapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.EAGER));
+
+ assertThat(docMapper.mappers().fullName("name.test2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.test2").mapper(), instanceOf(TokenCountFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.test2").mapper().fieldType().tokenized(), equalTo(false));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().fullName("name.test2").mapper()).analyzer(), equalTo("simple"));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().fullName("name.test2").mapper()).analyzer(), equalTo("simple"));
+
+ assertThat(docMapper.mappers().fullName("object1.multi1").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("object1.multi1").mapper(), instanceOf(DateFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("object1.multi1.string").mapper().fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testBuildThenParse() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+
+ DocumentMapper builderDocMapper = doc("test", rootObject("person").add(
+ stringField("name").store(true)
+ .addMultiField(stringField("indexed").index(true).tokenized(true))
+ .addMultiField(stringField("not_indexed").index(false).store(true))
+ )).build(mapperParser);
+ builderDocMapper.refreshSource();
+
+ String builtMapping = builderDocMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper docMapper = mapperParser.parse(builtMapping);
+
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+ }
+
+ @Test
+ public void testConvertMultiFieldNoDefaultField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+
+ assertNull(doc.getField("name"));
+ IndexableField f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("name").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertNull(doc.getField("age"));
+ f = doc.getField("age.not_stored");
+ assertThat(f.name(), equalTo("age.not_stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("age.stored");
+ assertThat(f.name(), equalTo("age.stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("age").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("age.not_stored").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("age.stored").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("age.stored").mapper(), instanceOf(LongFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("age.stored").mapper().fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testConvertMultiFieldGeoPoint() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), instanceOf(GeoPointFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().tokenized(), equalTo(false));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("a", "-1,-1")
+ .endObject().bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b").mapper(), instanceOf(GeoPointFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("b", "-1,-1")
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .startArray("b").startArray().value(-1).value(-1).endArray().startArray().value(-2).value(-2).endArray().endArray()
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getFields("b")[0];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getFields("b")[1];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-2.0,-2.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ // NOTE: "]" B/c the lat,long aren't specified as a string, we miss the actual values when parsing the multi
+ // fields. We already skipped over the coordinates values and can't get to the coordinates.
+ // This happens if coordinates are specified as array and object.
+ assertThat(f.stringValue(), equalTo("]"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ }
+
+ @Test
+ public void testConvertMultiFieldCompletion() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a").mapper().fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("a.b").mapper(), instanceOf(CompletionFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("a.b").mapper().fieldType().tokenized(), equalTo(true));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("a", "complete me")
+ .endObject().bytes();
+ Document doc = docMapper.parse(json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b").mapper(), instanceOf(CompletionFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b").mapper().fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("b.a").mapper(), instanceOf(StringFieldMapper.class));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().fullName("b.a").mapper().fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("_id", "1")
+ .field("b", "complete me")
+ .endObject().bytes();
+ doc = docMapper.parse(json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertThat(f.fieldType().indexed(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
new file mode 100644
index 0000000..1808074
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.index.query.FilterBuilders.geoDistanceFilter;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiFieldsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testMultiFields() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createTypeSource())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(1));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title", "multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.not_analyzed", "Multi fields"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ assertAcked(
+ client().admin().indices().preparePutMapping("my-index").setType("my-type")
+ .setSource(createPutMappingSource())
+ .setIgnoreConflicts(true) // If updated with multi-field type, we need to ignore failures.
+ );
+
+ getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ mappingSource = mappingMetaData.sourceAsMap();
+ assertThat(((Map) XContentMapValues.extractValue("properties.title", mappingSource)).size(), equalTo(2));
+ titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(2));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+ assertThat(titleFields.get("uncased"), notNullValue());
+ assertThat(((Map)titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.uncased", "Multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testGeoPointMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("geo_point"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("geo_point"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "51,19").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index")
+ .setQuery(constantScoreQuery(geoDistanceFilter("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS)))
+ .get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "51,19")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testTokenCountMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "token_count")
+ .field("analyzer", "simple")
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(3));
+ assertThat(aField.get("type").toString(), equalTo("token_count"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "my tokens").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "my tokens")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testCompletionMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("completion"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(7));
+ assertThat(aField.get("type").toString(), equalTo("completion"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "complete me").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "complete me")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testIpMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("ip"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("ip"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "127.0.0.1").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "127.0.0.1")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ private XContentBuilder createMappingSource(String fieldType) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", fieldType)
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private XContentBuilder createTypeSource() throws IOException {
+ if (randomBoolean()) {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("not_analyzed")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ } else {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("title")
+ .field("type", "string")
+ .endObject()
+ .startObject("not_analyzed")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+ }
+
+ private XContentBuilder createPutMappingSource() throws IOException {
+ if (randomBoolean()) {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("uncased")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ } else {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("uncased")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
new file mode 100644
index 0000000..d866363
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield.merge;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class JavaMultiFieldMergeTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMergeMultiField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = MapperTestUtils.newParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed"), nullValue());
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+ json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ doc = docMapper.parse(json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+
+
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper4, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3").mapper(), notNullValue());
+ }
+
+ @Test
+ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = MapperTestUtils.newParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed"), nullValue());
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+ json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-data.json"));
+ doc = docMapper.parse(json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3, mergeFlags().simulate(false));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3"), nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(true));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values"));
+
+ mergeResult = docMapper.merge(docMapper4, mergeFlags().simulate(false));
+ assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true));
+
+ assertThat(docMapper.mappers().name("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values"));
+
+ // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts
+ assertThat(docMapper.mappers().fullName("name").mapper().fieldType().indexed(), equalTo(true));
+ assertThat(docMapper.mappers().fullName("name.indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed2").mapper(), notNullValue());
+ assertThat(docMapper.mappers().fullName("name.not_indexed3").mapper(), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
new file mode 100644
index 0000000..c539fcc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
@@ -0,0 +1,4 @@
+{
+ _id:1,
+ name:"some name"
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
new file mode 100644
index 0000000..61f08af
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
@@ -0,0 +1,11 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
new file mode 100644
index 0000000..02ce895
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
@@ -0,0 +1,27 @@
+{
+ "person" :{
+ "properties" :{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes",
+ "fields":{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes"
+ },
+ "indexed":{
+ "type" :"string",
+ "index" :"analyzed"
+ },
+ "not_indexed":{
+ "type" :"string",
+ "index" :"no",
+ "store" :"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
new file mode 100644
index 0000000..ea07675
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
@@ -0,0 +1,32 @@
+{
+ "person" : {
+ "properties" :{
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes",
+ "fields": {
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
new file mode 100644
index 0000000..384c263
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
@@ -0,0 +1,18 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
new file mode 100644
index 0000000..6206592
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
@@ -0,0 +1,25 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
new file mode 100644
index 0000000..4a8fbf6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
@@ -0,0 +1,30 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
new file mode 100644
index 0000000..9b30978
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
@@ -0,0 +1,16 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
new file mode 100644
index 0000000..35f9418
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
@@ -0,0 +1,8 @@
+{
+ "_id":1,
+ "age":28,
+ "name":"some name",
+ "object1":{
+ "multi1":"2010-01-01"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
new file mode 100644
index 0000000..d36e9d2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
new file mode 100644
index 0000000..c7d11be
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
new file mode 100644
index 0000000..99b74c0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
@@ -0,0 +1,32 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "multi_field",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ }
+ }
+ },
+ "age": {
+ "type": "multi_field",
+ "fields": {
+ "not_stored": {
+ "type": "long"
+ },
+ "stored": {
+ "type": "long",
+ "store": "yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
new file mode 100644
index 0000000..b099b9a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "properties":{
+ "name":{
+ "type":"multi_field",
+ "fields":{
+ "name":{
+ "type":"string",
+ "index":"analyzed",
+ "store":"yes"
+ },
+ "indexed":{
+ "type":"string",
+ "index":"analyzed"
+ },
+ "not_indexed":{
+ "type":"string",
+ "index":"no",
+ "store":"yes"
+ },
+ "test1" : {
+ "type":"string",
+ "index":"analyzed",
+ "store" : "yes",
+ "fielddata" : {
+ "loading" : "eager"
+ }
+ },
+ "test2" : {
+ "type" : "token_count",
+ "store" : "yes",
+ "index" : "not_analyzed",
+ "analyzer" : "simple"
+ }
+ }
+ },
+ "object1":{
+ "properties":{
+ "multi1":{
+ "type":"multi_field",
+ "fields":{
+ "multi1":{
+ "type":"date"
+ },
+ "string":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
new file mode 100644
index 0000000..b116665
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
@@ -0,0 +1,50 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "no"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ },
+ "test1": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fielddata": {
+ "loading": "eager"
+ }
+ },
+ "test2": {
+ "type": "token_count",
+ "index": "not_analyzed",
+ "store": "yes",
+ "analyzer": "simple"
+ }
+ }
+ },
+ "object1": {
+ "properties": {
+ "multi1": {
+ "type": "date",
+ "fields": {
+ "string": {
+ "type": "string",
+ "index": "not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
new file mode 100644
index 0000000..6596463
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.nested;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class NestedMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void emptyNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .nullField("nested1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested").endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+ }
+
+ @Test
+ public void singleNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startObject("nested1").field("field1", "1").field("field2", "2").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(2));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(1).get("field"), equalTo("value"));
+
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").field("field2", "2").endObject()
+ .startObject().field("field1", "3").field("field2", "4").endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(3));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4"));
+ assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(2).get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested")
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested2() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").field("include_in_parent", true).startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).getFields("nested1.field1").length, equalTo(2));
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+
+ @Test
+ public void multiRootAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_root", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java b/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
new file mode 100644
index 0000000..541a05d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.numeric;
+
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.string.SimpleStringMappingTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleNumericTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNumericDetectionEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("numeric_detection", true)
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(LongFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(DoubleFieldMapper.class));
+ }
+
+ @Test
+ public void testNumericDetectionDefault() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "integer").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "integer").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "1")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+ }
+
+ @Test
+ public void testCoerceOption() throws Exception {
+ String [] nonFractionNumericFieldTypes={"integer","long","short"};
+ //Test co-ercion policies on all non-fraction numerics
+ for (String nonFractionNumericFieldType : nonFractionNumericFieldTypes) {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("noErrorNoCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", false).endObject()
+ .startObject("noErrorCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", true).endObject()
+ .startObject("errorDefaultCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false).endObject()
+ .startObject("errorNoCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false)
+ .field("coerce", false).endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ //Test numbers passed as strings
+ String invalidJsonNumberAsString="1";
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonNumberAsString)
+ .field("noErrorCoerceField", invalidJsonNumberAsString)
+ .field("errorDefaultCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertThat(doc.rootDoc().getField("noErrorCoerceField"), notNullValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertThat(doc.rootDoc().getField("errorDefaultCoerce"), notNullValue());
+
+ //Test valid case of numbers passed as numbers
+ int validNumber=1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNumber)
+ .field("noErrorCoerceField", validNumber)
+ .field("errorDefaultCoerce", validNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ //Test valid case of negative numbers passed as numbers
+ int validNegativeNumber=-1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNegativeNumber)
+ .field("noErrorCoerceField", validNegativeNumber)
+ .field("errorDefaultCoerce", validNegativeNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+
+ //Test questionable case of floats passed to ints
+ float invalidJsonForInteger=1.9f;
+ int coercedFloatValue=1; //This is what the JSON parser will do to a float - truncate not round
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonForInteger)
+ .field("noErrorCoerceField", invalidJsonForInteger)
+ .field("errorDefaultCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+ }
+
+
+ public void testDocValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "int"));
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "double"));
+ }
+
+ public void testDocValuesOnNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("nested")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("nested")
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .startObject()
+ .field("int", "-1")
+ .field("double", "-2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+ for (Document doc : parsedDoc.docs()) {
+ if (doc == parsedDoc.rootDoc()) {
+ continue;
+ }
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "nested.int"));
+ assertEquals(DocValuesType.BINARY, SimpleStringMappingTests.docValuesType(doc, "nested.double"));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
new file mode 100644
index 0000000..4c732b7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class NullValueObjectMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNullValueObject() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("obj1").field("type", "object").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .nullField("obj1")
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").field("field", "value").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field"), equalTo("value"));
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
new file mode 100644
index 0000000..b94a30c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+/**
+ */
+public class SimpleObjectMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDifferentInnerObjectTokenFailure() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+ try {
+ defaultMapper.parse("type", "1", new BytesArray(" {\n" +
+ " \"object\": {\n" +
+ " \"array\":[\n" +
+ " {\n" +
+ " \"object\": { \"value\": \"value\" }\n" +
+ " },\n" +
+ " {\n" +
+ " \"object\":\"value\"\n" +
+ " }\n" +
+ " ]\n" +
+ " },\n" +
+ " \"value\":\"value\"\n" +
+ " }"));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
new file mode 100644
index 0000000..04497a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.parent;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ParentMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void parentNotMapped() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ // no _parent mapping, used as a simple field
+ assertThat(doc.parent(), nullValue());
+ assertThat(doc.rootDoc().get("_parent"), nullValue());
+ }
+
+ @Test
+ public void parentSetInDocNotExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ assertThat(doc.parent(), equalTo("1122"));
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+
+ @Test
+ public void parentNotSetInDocSetExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1").parent("1122"));
+
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+
+ @Test
+ public void parentSetInDocSetExternally() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1").parent("1122"));
+
+ assertThat(doc.rootDoc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
new file mode 100644
index 0000000..e05f7cd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.path;
+
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPathMapping() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/path/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat(docMapper.mappers().indexName("first1"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name1.first1"), nullValue());
+ assertThat(docMapper.mappers().indexName("last1"), nullValue());
+ assertThat(docMapper.mappers().indexName("i_last_1"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name1.last1"), nullValue());
+ assertThat(docMapper.mappers().indexName("name1.i_last_1"), nullValue());
+
+ assertThat(docMapper.mappers().indexName("first2"), nullValue());
+ assertThat(docMapper.mappers().indexName("name2.first2"), notNullValue());
+ assertThat(docMapper.mappers().indexName("last2"), nullValue());
+ assertThat(docMapper.mappers().indexName("i_last_2"), nullValue());
+ assertThat(docMapper.mappers().indexName("name2.i_last_2"), notNullValue());
+ assertThat(docMapper.mappers().indexName("name2.last2"), nullValue());
+
+ // test full name
+ assertThat(docMapper.mappers().fullName("first1"), nullValue());
+ assertThat(docMapper.mappers().fullName("name1.first1"), notNullValue());
+ assertThat(docMapper.mappers().fullName("last1"), nullValue());
+ assertThat(docMapper.mappers().fullName("i_last_1"), nullValue());
+ assertThat(docMapper.mappers().fullName("name1.last1"), notNullValue());
+ assertThat(docMapper.mappers().fullName("name1.i_last_1"), nullValue());
+
+ assertThat(docMapper.mappers().fullName("first2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.first2"), notNullValue());
+ assertThat(docMapper.mappers().fullName("last2"), nullValue());
+ assertThat(docMapper.mappers().fullName("i_last_2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.i_last_2"), nullValue());
+ assertThat(docMapper.mappers().fullName("name2.last2"), notNullValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
new file mode 100644
index 0000000..200a451
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
@@ -0,0 +1,32 @@
+{
+ "person":{
+ "properties":{
+ "name1":{
+ "type":"object",
+ "path":"just_name",
+ "properties":{
+ "first1":{
+ "type":"string"
+ },
+ "last1":{
+ "type":"string",
+ "index_name":"i_last_1"
+ }
+ }
+ },
+ "name2":{
+ "type":"object",
+ "path":"full",
+ "properties":{
+ "first2":{
+ "type":"string"
+ },
+ "last2":{
+ "type":"string",
+ "index_name":"i_last_2"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
new file mode 100644
index 0000000..1793256
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.routing;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class RoutingTypeMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleRoutingMapperTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes()).type("type").id("1").routing("routing_value"));
+
+ assertThat(doc.rootDoc().get("_routing"), equalTo("routing_value"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing")
+ .field("store", "no")
+ .field("index", "no")
+ .field("path", "route")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.routingFieldMapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.routingFieldMapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.routingFieldMapper().path(), equalTo("route"));
+ }
+
+ @Test
+ public void testThatSerializationWorksCorrectlyForIndexField() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing").field("store", "no").field("index", "no").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.routingFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_routing"));
+ assertThat(serializedMap.get("_routing"), instanceOf(Map.class));
+ Map<String, Object> routingConfiguration = (Map<String, Object>) serializedMap.get("_routing");
+ assertThat(routingConfiguration, hasKey("store"));
+ assertThat(routingConfiguration.get("store").toString(), is("false"));
+ assertThat(routingConfiguration, hasKey("index"));
+ assertThat(routingConfiguration.get("index").toString(), is("no"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
new file mode 100644
index 0000000..79951e4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.simple;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleMapperTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleMapper() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+ DocumentMapper docMapper = doc("test",
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(mapperParser);
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+ assertThat(docMapper.mappers().name("first").mapper().names().fullName(), equalTo("name.first"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ doc = docMapper.parse(json).rootDoc();
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testParseToJsonAndParse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = builtDocMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParser() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParserNoTypeNoId() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testAttributes() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ String builtMapping = docMapper.mappingSource().string();
+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);
+ assertThat((String) builtDocMapper.meta().get("param1"), equalTo("value1"));
+ }
+
+ @Test
+ public void testNoDocumentSent() throws Exception {
+ DocumentMapperParser mapperParser = MapperTestUtils.newParser();
+ DocumentMapper docMapper = doc("test",
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(mapperParser);
+
+ BytesReference json = new BytesArray("".getBytes(Charsets.UTF_8));
+ try {
+ docMapper.parse("person", "1", json).rootDoc();
+ fail("this point is never reached");
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse, document is empty"));
+ }
+ }
+
+ @Test
+ public void testTypeWrapperWithSetting() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ Settings settings = ImmutableSettings.settingsBuilder().put("index.mapping.allow_type_wrapper", true).build();
+ DocumentMapper docMapper = MapperTestUtils.newParser(settings).parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-withtype.json"));
+ Document doc = docMapper.parse(json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json b/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
new file mode 100644
index 0000000..d45d557
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
@@ -0,0 +1,98 @@
+{
+ person:{
+ "_meta":{
+ "param1":"value1"
+ },
+ date_formats:["yyyy-MM-dd", "dd-MM-yyyy"],
+ dynamic:false,
+ enabled:true,
+ _id:{
+ name:"_id",
+ index_name:"_id"
+ },
+ _source:{
+ name:"_source"
+ },
+ _type:{
+ name:"_type"
+ },
+ _boost:{
+ name:"_boost",
+ null_value:2.0
+ },
+ properties:{
+ name:{
+ type:"object",
+ dynamic:false,
+ properties:{
+ first:{
+ type:"string",
+ store:"yes"
+ },
+ last:{
+ type:"string",
+ index:"not_analyzed"
+ }
+ }
+ },
+ address:{
+ type:"object",
+ properties:{
+ first:{
+ properties:{
+ location:{
+ type:"string",
+ store:"yes",
+ index_name:"firstLocation"
+ }
+ }
+ },
+ last:{
+ properties:{
+ location:{
+ type:"string"
+ }
+ }
+ }
+ }
+ },
+ age:{
+ type:"integer",
+ null_value:0
+ },
+ birthdate:{
+ type:"date",
+ format:"yyyy-MM-dd"
+ },
+ nerd:{
+ type:"boolean"
+ },
+ dogs:{
+ type:"string",
+ index_name:"dog"
+ },
+ complex:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ },
+ complex2:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
new file mode 100644
index 0000000..745617a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
@@ -0,0 +1,40 @@
+{
+ _boost:3.7,
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
new file mode 100644
index 0000000..7f38d0d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
@@ -0,0 +1,41 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
new file mode 100644
index 0000000..096554a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
@@ -0,0 +1,43 @@
+{
+ person:{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/simple/test1.json b/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
new file mode 100644
index 0000000..93507da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
@@ -0,0 +1,41 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
new file mode 100644
index 0000000..bb39993
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.size;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+public class SizeMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSizeEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(false));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testSizeEnabledAndStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testSizeDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ @Test
+ public void testSizeNotSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ @Test
+ public void testThatDisablingWorksWhenMerging() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = MapperTestUtils.newParser().parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+ assertThat(enabledMapper.SizeFieldMapper().enabled(), is(false));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
new file mode 100644
index 0000000..e108122
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class CompressSourceMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCompressDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
+ }
+
+ @Test
+ public void testCompressEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
+ }
+
+ @Test
+ public void testCompressThreshold() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress_threshold", "200b").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(false));
+
+ doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .endObject().bytes());
+
+ bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(bytes.bytes, bytes.offset, bytes.length), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
new file mode 100644
index 0000000..2a21f48
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DefaultSourceMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNoFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE));
+ }
+
+ @Test
+ public void testJsonFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+ }
+
+ @Test
+ public void testJsonFormatCompressed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+
+ documentMapper = MapperTestUtils.newParser().parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+ }
+
+ @Test
+ public void testIncludeExclude() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("includes", new String[]{"path1*"}).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("field1", "value1").endObject()
+ .startObject("path2").field("field2", "value2").endObject()
+ .endObject().bytes());
+
+ IndexableField sourceField = doc.rootDoc().getField("_source");
+ Map<String, Object> sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose();
+ assertThat(sourceAsMap.containsKey("path1"), equalTo(true));
+ assertThat(sourceAsMap.containsKey("path2"), equalTo(false));
+ }
+
+ @Test
+ public void testDefaultMappingAndNoMapping() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("my_type", null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ try {
+ mapper = MapperTestUtils.newParser().parse(null, null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ try {
+ mapper = MapperTestUtils.newParser().parse(null, "{}", defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("malformed mapping no root object found"));
+ // all is well
+ }
+ }
+
+ @Test
+ public void testDefaultMappingAndWithMappingOverride() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("my_type", mapping, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testDefaultMappingAndNoMappingWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type");
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ }
+
+ @Test
+ public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = MapperTestUtils.newMapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true);
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ mapperService.merge("my_type", new CompressedString(mapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapper("my_type");
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
new file mode 100644
index 0000000..b10ef46
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.string;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleStringMappingTests extends ElasticsearchTestCase {
+
+ private static Settings DOC_VALUES_SETTINGS = ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ @Test
+ public void testLimit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("ignore_above", 5).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "12345")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "123456")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), nullValue());
+ }
+
+ private void assertDefaultAnalyzedFieldType(IndexableFieldType fieldType) {
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ }
+
+ private void assertEquals(IndexableFieldType ft1, IndexableFieldType ft2) {
+ assertEquals(ft1.indexed(), ft2.indexed());
+ assertEquals(ft1.tokenized(), ft2.tokenized());
+ assertEquals(ft1.omitNorms(), ft2.omitNorms());
+ assertEquals(ft1.indexOptions(), ft2.indexOptions());
+ assertEquals(ft1.storeTermVectors(), ft2.storeTermVectors());
+ assertEquals(ft1.docValueType(), ft2.docValueType());
+ }
+
+ private void assertParseIdemPotent(IndexableFieldType expected, DocumentMapper mapper) throws Exception {
+ String mapping = mapper.toXContent(XContentFactory.jsonBuilder().startObject(), new ToXContent.MapParams(ImmutableMap.<String, String>of())).endObject().string();
+ mapper = MapperTestUtils.newParser().parse(mapping);
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "2345")
+ .endObject()
+ .bytes());
+ assertEquals(expected, doc.rootDoc().getField("field").fieldType());
+ }
+
+ @Test
+ public void testDefaultsForAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertDefaultAnalyzedFieldType(fieldType);
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testDefaultsForNotAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(true));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_ONLY));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // now test it explicitly set
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").startObject("norms").field("enabled", true).endObject().field("index_options", "freqs").endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // also test the deprecated omit_norms
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testTermVectors() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .field("term_vector", "no")
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("term_vector", "yes")
+ .endObject()
+ .startObject("field3")
+ .field("type", "string")
+ .field("term_vector", "with_offsets")
+ .endObject()
+ .startObject("field4")
+ .field("type", "string")
+ .field("term_vector", "with_positions")
+ .endObject()
+ .startObject("field5")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("field6")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "1234")
+ .field("field2", "1234")
+ .field("field3", "1234")
+ .field("field4", "1234")
+ .field("field5", "1234")
+ .field("field6", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true));
+ }
+
+ public void testDocValues() throws Exception {
+ // doc values only work on non-analyzed content
+ final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
+ try {
+ new StringFieldMapper.Builder("anything").fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+ fail();
+ } catch (Exception e) { /* OK */ }
+ new StringFieldMapper.Builder("anything").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+ new StringFieldMapper.Builder("anything").index(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx);
+
+ assertFalse(new StringFieldMapper.Builder("anything").index(false).build(ctx).hasDocValues());
+ assertTrue(new StringFieldMapper.Builder("anything").index(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx).hasDocValues());
+ assertTrue(new StringFieldMapper.Builder("anything").index(false).docValues(true).build(ctx).hasDocValues());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("str1")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .endObject()
+ .endObject()
+ .startObject("str2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("str1", "1234")
+ .field("str2", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(null, docValuesType(doc, "str1"));
+ assertEquals(DocValuesType.SORTED_SET, docValuesType(doc, "str2"));
+ }
+
+ public static DocValuesType docValuesType(Document document, String fieldName) {
+ for (IndexableField field : document.getFields(fieldName)) {
+ if (field.fieldType().docValueType() != null) {
+ return field.fieldType().docValueType();
+ }
+ }
+ return null;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
new file mode 100644
index 0000000..4cac4ce
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.timestamp;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Locale;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class TimestampMappingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", "yes").field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_timestamp").fieldType().indexed(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_timestamp").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(TimestampFieldMapper.Defaults.ENABLED.enabled));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.stored()));
+ assertThat(docMapper.timestampFieldMapper().fieldType().indexed(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.indexed()));
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo(null));
+ assertThat(docMapper.timestampFieldMapper().dateTimeFormatter().format(), equalTo(TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT));
+ }
+
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes").field("store", "yes").field("index", "no")
+ .field("path", "timestamp").field("format", "year")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(true));
+ assertThat(docMapper.timestampFieldMapper().fieldType().indexed(), equalTo(false));
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo("timestamp"));
+ assertThat(docMapper.timestampFieldMapper().dateTimeFormatter().format(), equalTo("year"));
+ }
+
+ @Test
+ public void testThatDisablingDuringMergeIsWorking() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = MapperTestUtils.newParser().parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
+
+ assertThat(enabledMapper.timestampFieldMapper().enabled(), is(false));
+ }
+
+ @Test
+ public void testThatDisablingFieldMapperDoesNotReturnAnyUselessInfo() throws Exception {
+ boolean inversedStoreSetting = !TimestampFieldMapper.Defaults.FIELD_TYPE.stored();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false).field("store", inversedStoreSetting).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse(mapping);
+
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ mapper.timestampFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+
+ assertThat(builder.string(), is(String.format(Locale.ROOT, "{\"%s\":{}}", TimestampFieldMapper.NAME)));
+ }
+
+ @Test // issue 3174
+ public void testThatSerializationWorksCorrectlyForIndexField() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").field("index", "no").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = MapperTestUtils.newParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.timestampFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_timestamp"));
+ assertThat(serializedMap.get("_timestamp"), instanceOf(Map.class));
+ Map<String, Object> timestampConfiguration = (Map<String, Object>) serializedMap.get("_timestamp");
+ assertThat(timestampConfiguration, hasKey("store"));
+ assertThat(timestampConfiguration.get("store").toString(), is("true"));
+ assertThat(timestampConfiguration, hasKey("index"));
+ assertThat(timestampConfiguration.get("index").toString(), is("no"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
new file mode 100644
index 0000000..73193ba
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ttl;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class TTLMappingTests extends ElasticsearchTestCase {
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl").field("enabled", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_ttl").fieldType().indexed(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_ttl").tokenStream(docMapper.indexAnalyzer()), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(TTLFieldMapper.Defaults.ENABLED_STATE.enabled));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.stored()));
+ assertThat(docMapper.TTLFieldMapper().fieldType().indexed(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.indexed()));
+ }
+
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes").field("store", "no").field("index", "no")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(false));
+ assertThat(docMapper.TTLFieldMapper().fieldType().indexed(), equalTo(false));
+ }
+
+ @Test
+ public void testThatEnablingTTLFieldOnMergeWorks() throws Exception {
+ String mappingWithoutTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes").field("store", "no").field("index", "no")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapperWithoutTtl = MapperTestUtils.newParser().parse(mappingWithoutTtl);
+ DocumentMapper mapperWithTtl = MapperTestUtils.newParser().parse(mappingWithTtl);
+
+ DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false);
+ DocumentMapper.MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl, mergeFlags);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testThatChangingTTLKeepsMapperEnabled() throws Exception {
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("default", "1w")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper initialMapper = MapperTestUtils.newParser().parse(mappingWithTtl);
+ DocumentMapper updatedMapper = MapperTestUtils.newParser().parse(updatedMapping);
+
+ DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false);
+ DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper, mergeFlags);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
new file mode 100644
index 0000000..56d1a96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ParseDocumentTypeLevelsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNoLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // in this case, we analyze the type object as the actual document, and ignore the other same level fields
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("type", "value_type")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("type", "value_type")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // when the type is not the first one, we don't confuse it...
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
new file mode 100644
index 0000000..e2c7a93
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ParseMappingTypeLevelTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = MapperTestUtils.newParser().parse("type", mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+
+ mapper = MapperTestUtils.newParser().parse(mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
new file mode 100644
index 0000000..de3456b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.merge.policy;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.distributor.LeastUsedDistributor;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MergePolicySettingsTest {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ @Test
+ public void testCompoundFileSettings() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+
+ assertThat(new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new TieredMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(true)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.5)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(1.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("true")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("True")), service).newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("False")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("false")), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(false)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.0)), service).newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ }
+
+ @Test
+ public void testInvalidValue() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ try {
+ new LogDocMergePolicyProvider(createStore(build(-0.1)), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build(1.1)), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build("Falsch")), service).newMergePolicy().getNoCFSRatio();
+ assertThat("exception expected", false);
+ } catch (ElasticsearchIllegalArgumentException ex) {
+
+ }
+
+ }
+
+ @Test
+ public void testUpdateSettings() throws IOException {
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ TieredMergePolicyProvider mp = new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogByteSizeMergePolicyProvider mp = new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogDocMergePolicyProvider mp = new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.newMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+ }
+
+ public Settings build(String value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(double value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(int value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(boolean value) {
+ return ImmutableSettings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ protected Store createStore(Settings settings) throws IOException {
+ DirectoryService directoryService = new RamDirectoryService(shardId, EMPTY_SETTINGS);
+ return new Store(shardId, settings, null, null, directoryService, new LeastUsedDistributor(directoryService));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
new file mode 100644
index 0000000..b7acdf8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class GeoShapeQueryBuilderTests extends ElasticsearchTestCase {
+
+ @Test // see #3878
+ public void testThatXContentSerializationInsideOfArrayWorks() throws Exception {
+ EnvelopeBuilder envelopeBuilder = ShapeBuilder.newEnvelope().topLeft(0, 0).bottomRight(10, 10);
+ GeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery("searchGeometry", envelopeBuilder);
+ JsonXContent.contentBuilder().startArray().value(geoQuery).endArray();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java b/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java
new file mode 100644
index 0000000..1dc979f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.CachedFilter;
+import org.elasticsearch.common.lucene.search.NoCacheFilter;
+import org.elasticsearch.common.lucene.search.XBooleanFilter;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.search.child.TestSearchContext;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class IndexQueryParserFilterCachingTests extends ElasticsearchTestCase {
+
+ private static Injector injector;
+
+ private static IndexQueryParserService queryParser;
+
+ @BeforeClass
+ public static void setupQueryParser() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.cache.filter.type", "weighted")
+ .build();
+ Index index = new Index("test");
+ injector = new ModulesBuilder().add(
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new MapperServiceModule(),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
+ String childMapping = copyToStringFromClasspath("/org/elasticsearch/index/query/child-mapping.json");
+ injector.getInstance(MapperService.class).merge("child", new CompressedString(childMapping), true);
+ injector.getInstance(MapperService.class).documentMapper("person").parse(new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ @AfterClass
+ public static void close() {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ queryParser = null;
+ injector = null;
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ private BytesRef longToPrefixCoded(long val, int shift) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(val, shift, bytesRef);
+ return bytesRef;
+ }
+
+
+ @Test
+ public void testNoFilterParsing() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery)parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery)parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter)((ConstantScoreQuery)parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));
+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));
+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().size(), is(2));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/has-child.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(NoCacheFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-cache.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/has-child-in-and-filter-cached.json");
+ parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(AndFilter.class));
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
new file mode 100644
index 0000000..45dc8cf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
@@ -0,0 +1,2298 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.*;
+import org.apache.lucene.sandbox.queries.FuzzyLikeThisQuery;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.spans.*;
+import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.lucene.search.*;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.search.NumericRangeFieldDataFilter;
+import org.elasticsearch.index.search.geo.GeoDistanceFilter;
+import org.elasticsearch.index.search.geo.GeoPolygonFilter;
+import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxFilter;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.hamcrest.Matchers;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.RegexpFlag.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
+
+ private static Injector injector;
+
+ private static IndexQueryParserService queryParser;
+
+ @BeforeClass
+ public static void setupQueryParser() throws IOException {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.cache.filter.type", "none")
+ .build();
+ Index index = new Index("test");
+ injector = new ModulesBuilder().add(
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new MapperServiceModule(),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
+ injector.getInstance(MapperService.class).documentMapper("person").parse(new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ @AfterClass
+ public static void close() {
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ queryParser = null;
+ injector = null;
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ private BytesRef longToPrefixCoded(long val, int shift) {
+ BytesRef bytesRef = new BytesRef();
+ NumericUtils.longToPrefixCoded(val, shift, bytesRef);
+ return bytesRef;
+ }
+
+ @Test
+ public void testQueryStringBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").defaultField("content").phraseSlop(1)).query();
+
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryStringBoostsBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ QueryStringQueryBuilder builder = queryString("field:boosted^2");
+ Query parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) parsedQuery).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(parsedQuery.getBoost(), equalTo(2.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(4.0f));
+
+ builder = queryString("((field:boosted^2) AND (field:foo^1.5))^3");
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getBoost(), equalTo(2.0f));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("field", "foo")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getBoost(), equalTo(1.5f));
+ assertThat(parsedQuery.getBoost(), equalTo(3.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(6.0f));
+ }
+
+ @Test
+ public void testQueryStringFields1Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content").field("name").useDisMax(false)).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFieldsMatch() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields-match.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name.last", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields2Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content").field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields3Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryString("test").field("content", 2.2f).field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testQueryStringFields3() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testMatchAllBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(matchAllQuery().boost(1.2f)).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAll() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/matchAll.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAllEmpty1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+ }
+
+ @Test
+ public void testMatchAllEmpty2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+
+ }
+
+ @Test
+ public void testStarColonStar() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/starColonStar.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ Filter internalFilter = constantScoreQuery.getFilter();
+ assertThat(internalFilter, instanceOf(MatchAllDocsFilter.class));
+ }
+
+ @Test
+ public void testDisMaxBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(disMaxQuery().boost(1.2f).tieBreaker(0.7f).add(termQuery("name.first", "first")).add(termQuery("name.last", "last"))).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(1));
+
+ PrefixQuery firstQ = (PrefixQuery) disjuncts.get(0);
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(firstQ.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) firstQ.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ }
+
+ @Test
+ public void testTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ }
+
+ @Test
+ public void testFuzzyQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFieldsBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").fuzziness(Fuzziness.fromSimilarity(0.1f)).prefixLength(1).boost(2.0f).buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(7l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(17l));
+ }
+
+ @Test
+ public void testTermWithBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f)).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ assertThat((double) fieldQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testTermWithBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fieldQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fieldQuery.getMin().intValue(), equalTo(34));
+ assertThat(fieldQuery.getMax().intValue(), equalTo(34));
+ assertThat(fieldQuery.includesMax(), equalTo(true));
+ assertThat(fieldQuery.includesMin(), equalTo(true));
+ assertThat((double) fieldQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testPrefixFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), prefixFilter("name.first", "sh"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ PrefixFilter prefixFilter = (PrefixFilter) filteredQuery.getFilter();
+ assertThat(prefixFilter.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixQueryBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh").boost(2.0f)).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryWithUnknownField() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("unknown", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("unknown", "sh")));
+ assertThat(prefixQuery.getRewriteMethod(), notNullValue());
+ }
+
+ @Test
+ public void testRegexpQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(regexpQuery("name.first", "s.*y")).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ }
+
+ @Test
+ public void testNamedRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ }
+
+ @Test
+ public void testRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) filter;
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ assertThat(regexpFilter.flags(), equalTo(INTERSECTION.value() | COMPLEMENT.value() | EMPTY.value()));
+ }
+
+ @Test
+ public void testNamedAndCachedRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(CacheKeyFilter.Wrapper.class));
+ CacheKeyFilter.Wrapper wrapper = (CacheKeyFilter.Wrapper) filter;
+ assertThat(new BytesRef(wrapper.cacheKey().bytes()).utf8ToString(), equalTo("key"));
+ assertThat(wrapper.wrappedFilter(), instanceOf(RegexpFilter.class));
+ RegexpFilter regexpFilter = (RegexpFilter) wrapper.wrappedFilter();
+ assertThat(regexpFilter.field(), equalTo("name.first"));
+ assertThat(regexpFilter.regexp(), equalTo("s.*y"));
+ assertThat(regexpFilter.flags(), equalTo(INTERSECTION.value() | COMPLEMENT.value() | EMPTY.value()));
+ }
+
+ @Test
+ public void testRegexpBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ assertThat(regexpQuery.getBoost(), equalTo(1.2f));
+ }
+
+ @Test
+ public void testWildcardQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(wildcardQuery("name.first", "sh*")).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ assertThat((double) wildcardQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testRangeQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(rangeQuery("age").from(23).to(54).includeLower(true).includeUpper(false)).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRange2Query() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery.query()).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ assertThat(rangeFilter.includesMin(), equalTo(true));
+ assertThat(rangeFilter.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testNumericRangeFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), numericRangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQueryBuilder_executionFieldData() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false).setExecution("fielddata"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testNumericRangeFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/numeric_range-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ Filter filter = ((XFilteredQuery) parsedQuery).getFilter();
+ assertThat(filter, instanceOf(NumericRangeFieldDataFilter.class));
+ NumericRangeFieldDataFilter<Number> rangeFilter = (NumericRangeFieldDataFilter<Number>) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getLowerVal().intValue(), equalTo(23));
+ assertThat(rangeFilter.getUpperVal().intValue(), equalTo(54));
+ assertThat(rangeFilter.isIncludeLower(), equalTo(true));
+ assertThat(rangeFilter.isIncludeUpper(), equalTo(false));
+ }
+
+ @Test
+ public void testBoolFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolFilter().must(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")).mustNot(termFilter("name.first", "shay2")).should(termFilter("name.first", "shay3")))).query();
+
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ XBooleanFilter booleanFilter = (XBooleanFilter) filteredQuery.getFilter();
+
+ Iterator<FilterClause> iterator = booleanFilter.iterator();
+ assertThat(iterator.hasNext(), equalTo(true));
+ FilterClause clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay4")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay2")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay3")));
+
+ assertThat(iterator.hasNext(), equalTo(false));
+ }
+
+
+ @Test
+ public void testBoolFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ XBooleanFilter booleanFilter = (XBooleanFilter) filteredQuery.getFilter();
+
+ Iterator<FilterClause> iterator = booleanFilter.iterator();
+ assertThat(iterator.hasNext(), equalTo(true));
+ FilterClause clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay4")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay2")));
+
+ assertThat(iterator.hasNext(), equalTo(true));
+ clause = iterator.next();
+ assertThat(clause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ assertThat(((TermFilter) clause.getFilter()).getTerm(), equalTo(new Term("name.first", "shay3")));
+
+ assertThat(iterator.hasNext(), equalTo(false));
+ }
+
+ @Test
+ public void testAndFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) constantScoreQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testAndFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ AndFilter andFilter = (AndFilter) filteredQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ OrFilter andFilter = (OrFilter) constantScoreQuery.getFilter();
+ assertThat(andFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) andFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) andFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ OrFilter orFilter = (OrFilter) filteredQuery.getFilter();
+ assertThat(orFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) orFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) orFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testOrFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+
+ OrFilter orFilter = (OrFilter) filteredQuery.getFilter();
+ assertThat(orFilter.filters().size(), equalTo(2));
+ assertThat(((TermFilter) orFilter.filters().get(0)).getTerm(), equalTo(new Term("name.first", "shay1")));
+ assertThat(((TermFilter) orFilter.filters().get(1)).getTerm(), equalTo(new Term("name.first", "shay4")));
+ }
+
+ @Test
+ public void testNotFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notFilter(termFilter("name.first", "shay1")))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+
+ NotFilter notFilter = (NotFilter) constantScoreQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testNotFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ NotFilter notFilter = (NotFilter) filteredQuery.getFilter();
+ assertThat(((TermFilter) notFilter.filter()).getTerm(), equalTo(new Term("name.first", "shay1")));
+ }
+
+ @Test
+ public void testBoostingQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boostingQuery().positive(termQuery("field1", "value1")).negative(termQuery("field1", "value2")).negativeBoost(0.2f)).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testBoostingQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/boosting-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testQueryStringFuzzyNumeric() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(12l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(12l));
+ }
+
+ @Test
+ public void testBoolQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boolQuery().must(termQuery("content", "test1")).must(termQuery("content", "test4")).mustNot(termQuery("content", "test2")).should(termQuery("content", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+
+ @Test
+ public void testBoolQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("shay", "test"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testInQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("test1", "test2", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(3));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test2")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("name.first", "test3")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termFilter("name.last", "banon"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+
+ Filter filter = filteredQuery.getFilter();
+ assertThat(filter, instanceOf(NumericRangeFilter.class));
+ NumericRangeFilter rangeFilter = (NumericRangeFilter) filter;
+ assertThat(rangeFilter.getField(), equalTo("age"));
+ assertThat(rangeFilter.getMin().intValue(), equalTo(23));
+ assertThat(rangeFilter.getMax().intValue(), equalTo(54));
+ }
+
+ @Test
+ public void testFilteredQuery4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ WildcardQuery wildcardQuery = (WildcardQuery) filteredQuery.getQuery();
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ assertThat((double) wildcardQuery.getBoost(), closeTo(1.1, 0.001));
+
+ assertThat(((TermFilter) filteredQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/limit-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(LimitFilter.class));
+ assertThat(((LimitFilter) filteredQuery.getFilter()).getLimit(), equalTo(2));
+
+ assertThat(filteredQuery.getQuery(), instanceOf(TermQuery.class));
+ assertThat(((TermQuery) filteredQuery.getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ }
+
+ @Test
+ public void testTermFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(TermFilter.class));
+ TermFilter termFilter = (TermFilter) filteredQuery.getFilter();
+ assertThat(termFilter.getTerm().field(), equalTo("name.last"));
+ assertThat(termFilter.getTerm().text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermNamedFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ assertThat(filteredQuery.getFilter(), instanceOf(TermFilter.class));
+ TermFilter termFilter = (TermFilter) filteredQuery.getFilter();
+ assertThat(termFilter.getTerm().field(), equalTo("name.last"));
+ assertThat(termFilter.getTerm().text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermsFilterQueryBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termsFilter("name.last", "banon", "kimchy"))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+
+ @Test
+ public void testTermsFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testTermsWithNameFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
+ //assertThat(termsFilter.getTerms().length, equalTo(2));
+ //assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
+ }
+
+ @Test
+ public void testConstantScoreQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(constantScoreQuery(termFilter("name.last", "banon"))).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testConstantScoreQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/constantScore-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(((TermFilter) constantScoreQuery.getFilter()).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ // Disabled since we need a current context to execute it...
+// @Test public void testCustomScoreQuery1() throws IOException {
+// IndexQueryParser queryParser = queryParser();
+// String query = copyToStringFromClasspath("/org/elasticsearch/index/query/custom_score1.json");
+// Query parsedQuery = queryParser.parse(query).query();
+// assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+// FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+// assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+// assertThat(functionScoreQuery.getFunction(), instanceOf(CustomScoreQueryParser.ScriptScoreFunction.class));
+// }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(customBoostFactorQuery(termQuery("name.last", "banon")).boostFactor(1.3f)).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScore() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(termQuery("name.last", "banon"), factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScoreWithoutQueryGiven() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(functionScoreQuery.getSubQuery() instanceof XConstantScoreQuery, equalTo(true));
+ assertThat(((XConstantScoreQuery) functionScoreQuery.getSubQuery()).getFilter() instanceof MatchAllDocsFilter, equalTo(true));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+
+ @Test
+ public void testCustomBoostFactorQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/custom-boost-factor-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testSpanTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanTermQuery("age", 34)).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanNotQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNotQuery().include(spanTermQuery("age", 34)).exclude(spanTermQuery("age", 35))).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanNotQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNot.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanFirstQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanFirstQuery(spanTermQuery("age", 34), 12)).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanFirstQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFirst.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanNearQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNearQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36)).slop(12).inOrder(false).collectPayloads(false)).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testSpanNearQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNear.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testFieldMaskingSpanQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFieldMaskingTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) ((FieldMaskingSpanQuery) spanNearQuery.getClauses()[2]).getMaskedQuery()).getTerm(), equalTo(new Term("age_1", "36")));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+
+ @Test
+ public void testSpanOrQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanOrQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36))).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanMultiTermWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ WildcardQuery expectedWrapped = new WildcardQuery(new Term("user", "ki*y"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ PrefixQuery expectedWrapped = new PrefixQuery(new Term("user", "ki"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper.getField(), equalTo("user"));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 7l, 17l, true, true);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermNumericRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-numeric.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 10l, 20l, true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermTermRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-numeric.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", 10l, 20l, true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testQueryFilterBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), queryFilter(termQuery("name.last", "banon")))).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testFQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fquery-filter.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
+ XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
+ QueryWrapperFilter queryWrapperFilter = (QueryWrapperFilter) filteredQuery.getFilter();
+ Field field = QueryWrapperFilter.class.getDeclaredField("query");
+ field.setAccessible(true);
+ Query wrappedQuery = (Query) field.get(queryWrapperFilter);
+ assertThat(wrappedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) wrappedQuery).getTerm(), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testMoreLikeThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(moreLikeThisQuery("name.first", "name.last").likeText("something").minTermFreq(1).maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThis() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mlt.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getMoreLikeFields()[1], equalTo("name.last"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testFuzzyLikeThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+ parsedQuery = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4"))).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+
+ Query parsedQuery1 = queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4.0"))).query();
+ assertThat(parsedQuery1, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, equalTo(parsedQuery1));
+
+ try {
+ queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("4.1"))).query();
+ fail("exception expected - fractional edit distance");
+ } catch (ElasticsearchException ex) {
+ //
+ }
+
+ try {
+ queryParser.parse(fuzzyLikeThisQuery("name.first", "name.last").likeText("something").maxQueryTerms(12).fuzziness(Fuzziness.build("-" + between(1, 100)))).query();
+ fail("exception expected - negative edit distance");
+ } catch (ElasticsearchException ex) {
+ //
+ }
+ String[] queries = new String[] {
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4.00000000\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": \"4.\"}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": 4}}",
+ "{\"flt\": {\"fields\": [\"comment\"], \"like_text\": \"FFFdfds\",\"fuzziness\": 4.0}}"
+ };
+ int iters = atLeast(5);
+ for (int i = 0; i < iters; i++) {
+ parsedQuery = queryParser.parse(new BytesArray((String) randomFrom(queries))).query();
+ parsedQuery1 = queryParser.parse(new BytesArray((String) randomFrom(queries))).query();
+ assertThat(parsedQuery1, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+ assertThat(parsedQuery, equalTo(parsedQuery1));
+ }
+ }
+
+ @Test
+ public void testFuzzyLikeThis() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzyLikeThis.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testFuzzyLikeFieldThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyLikeThisFieldQuery("name.first").likeText("something").maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testFuzzyLikeThisField() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzyLikeThisField.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyLikeThisQuery.class));
+// FuzzyLikeThisQuery fuzzyLikeThisQuery = (FuzzyLikeThisQuery) parsedQuery;
+ }
+
+ @Test
+ public void testMoreLikeThisFieldBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(moreLikeThisFieldQuery("name.first").likeText("something").minTermFreq(1).maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThisField() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mltField.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testGeoDistanceFilterNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter7() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance7.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(0.012, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter8() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance8.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.KILOMETERS.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter9() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance9.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter10() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance10.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter11() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance11.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceFilter12() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance12.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoDistanceFilter filter = (GeoDistanceFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.distance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilterNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoBoundingBoxFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxFilter filter = (InMemoryGeoBoundingBoxFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoPolygonNamedFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery.query();
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ GeoPolygonFilter filter = (GeoPolygonFilter) constantScoreQuery.getFilter();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoShapeFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(XConstantScoreQuery.class));
+ XConstantScoreQuery constantScoreQuery = (XConstantScoreQuery) parsedQuery;
+ assertThat(constantScoreQuery.getFilter(), instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testGeoShapeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery csq = (ConstantScoreQuery) parsedQuery;
+ assertThat(csq.getFilter(), instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testCommonTermsQuery1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test
+ public void testCommonTermsQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%"));
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%"));
+ }
+
+ @Test
+ public void testCommonTermsQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test(expected = QueryParsingException.class)
+ public void assureMalformedThrowsException() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/faulty-function-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ }
+
+ @Test
+ public void testFilterParsing() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/function-filter-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat((double) (parsedQuery.getBoost()), Matchers.closeTo(3.0, 1.e-7));
+ }
+
+ @Test
+ public void testBadTypeMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-simple.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ }
+
+ @Test
+ public void testBadTypeMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQueryWithFieldsAsString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+
+ @Test
+ public void testSimpleQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/simple-query-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter-cache.json b/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
new file mode 100644
index 0000000..41cc482
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter-named.json b/src/test/java/org/elasticsearch/index/query/and-filter-named.json
new file mode 100644
index 0000000..605a193
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter-named.json
@@ -0,0 +1,26 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter.json b/src/test/java/org/elasticsearch/index/query/and-filter.json
new file mode 100644
index 0000000..752add1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/and-filter2.json b/src/test/java/org/elasticsearch/index/query/and-filter2.json
new file mode 100644
index 0000000..580b8e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/and-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/bool-filter.json b/src/test/java/org/elasticsearch/index/query/bool-filter.json
new file mode 100644
index 0000000..484e517
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/bool-filter.json
@@ -0,0 +1,35 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ bool:{
+ must:[
+ {
+ term:{
+ "name.first":"shay1"
+ }
+ },
+ {
+ term:{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ must_not:{
+ term:{
+ "name.first":"shay2"
+ }
+ },
+ should:{
+ term:{
+ "name.first":"shay3"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/bool.json b/src/test/java/org/elasticsearch/index/query/bool.json
new file mode 100644
index 0000000..1619fcf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/bool.json
@@ -0,0 +1,30 @@
+{
+ bool:{
+ must:[
+ {
+ query_string:{
+ default_field:"content",
+ query:"test1"
+ }
+ },
+ {
+ query_string:{
+ default_field:"content",
+ query:"test4"
+ }
+ }
+ ],
+ must_not:{
+ query_string:{
+ default_field:"content",
+ query:"test2"
+ }
+ },
+ should:{
+ query_string:{
+ default_field:"content",
+ query:"test3"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/boosting-query.json b/src/test/java/org/elasticsearch/index/query/boosting-query.json
new file mode 100644
index 0000000..87b6e6d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/boosting-query.json
@@ -0,0 +1,15 @@
+{
+ "boosting":{
+ "positive":{
+ "term":{
+ "field1":"value1"
+ }
+ },
+ "negative":{
+ "term":{
+ "field2":"value2"
+ }
+ },
+ "negative_boost":0.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/child-mapping.json b/src/test/java/org/elasticsearch/index/query/child-mapping.json
new file mode 100644
index 0000000..6f3b6e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/child-mapping.json
@@ -0,0 +1,12 @@
+{
+ "child":{
+ "properties":{
+ "field":{
+ "type":"string"
+ }
+ },
+ "_parent" : {
+ "type" : "person"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
new file mode 100644
index 0000000..b2728da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : {
+ "low_freq" : 2
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
new file mode 100644
index 0000000..aeb281b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "minimum_should_match" : {
+ "high_freq" : "50%",
+ "low_freq" : "5<20%"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json b/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
new file mode 100644
index 0000000..f276209
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
@@ -0,0 +1,9 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : 2
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/constantScore-query.json b/src/test/java/org/elasticsearch/index/query/constantScore-query.json
new file mode 100644
index 0000000..bf59bc5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/constantScore-query.json
@@ -0,0 +1,9 @@
+{
+ constant_score:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json b/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json
new file mode 100644
index 0000000..6f82921
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/custom-boost-factor-query.json
@@ -0,0 +1,10 @@
+{
+ "custom_boost_factor":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "boost_factor":1.3
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/custom_score1.json b/src/test/java/org/elasticsearch/index/query/custom_score1.json
new file mode 100644
index 0000000..6d7dcac
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/custom_score1.json
@@ -0,0 +1,10 @@
+{
+ "custom_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "script":"score * doc['name.first']"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/data.json b/src/test/java/org/elasticsearch/index/query/data.json
new file mode 100644
index 0000000..b3c6db8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/data.json
@@ -0,0 +1,45 @@
+{
+ _boost:3.7,
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null,
+ "location":{
+ "lat":1.1,
+ "lon":1.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json
new file mode 100644
index 0000000..08fe069
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean.json
@@ -0,0 +1,25 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json
new file mode 100644
index 0000000..a6c0bdc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "2013-01-01"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json
new file mode 100644
index 0000000..cc779db
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now+1m+1s"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json
new file mode 100644
index 0000000..5e7da0a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now+1m+1s/m"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json
new file mode 100644
index 0000000..0040bdc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json
new file mode 100644
index 0000000..9dd1d1a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json
@@ -0,0 +1,26 @@
+{
+ "constant_score": {
+ "filter": {
+ "bool": {
+ "_cache" : true,
+ "must": [
+ {
+ "term": {
+ "foo": {
+ "value": "bar"
+ }
+ }
+ },
+ {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now/d"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/disMax.json b/src/test/java/org/elasticsearch/index/query/disMax.json
new file mode 100644
index 0000000..99da2df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/disMax.json
@@ -0,0 +1,18 @@
+{
+ dis_max:{
+ tie_breaker:0.7,
+ boost:1.2,
+ queries:[
+ {
+ term:{
+ "name.first":"first"
+ }
+ },
+ {
+ term:{
+ "name.last":"last"
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/disMax2.json b/src/test/java/org/elasticsearch/index/query/disMax2.json
new file mode 100644
index 0000000..ea92d64
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/disMax2.json
@@ -0,0 +1,14 @@
+{
+ "dis_max":{
+ "queries":[
+ {
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json b/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
new file mode 100644
index 0000000..07f906c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
@@ -0,0 +1,15 @@
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": {
+ {
+ "boost_factor" : 3
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/field3.json b/src/test/java/org/elasticsearch/index/query/field3.json
new file mode 100644
index 0000000..61e349f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/field3.json
@@ -0,0 +1,9 @@
+{
+ field:{
+ age:{
+ query:34,
+ boost:2.0,
+ enable_position_increments:false
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query.json b/src/test/java/org/elasticsearch/index/query/filtered-query.json
new file mode 100644
index 0000000..8eea99a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query2.json b/src/test/java/org/elasticsearch/index/query/filtered-query2.json
new file mode 100644
index 0000000..b23faf4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query2.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query3.json b/src/test/java/org/elasticsearch/index/query/filtered-query3.json
new file mode 100644
index 0000000..4a9db49
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query3.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/filtered-query4.json b/src/test/java/org/elasticsearch/index/query/filtered-query4.json
new file mode 100644
index 0000000..8c10013
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/filtered-query4.json
@@ -0,0 +1,17 @@
+{
+ filtered:{
+ query:{
+ wildcard:{
+ "name.first":{
+ wildcard:"sh*",
+ boost:1.1
+ }
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fquery-filter.json b/src/test/java/org/elasticsearch/index/query/fquery-filter.json
new file mode 100644
index 0000000..6015334
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fquery-filter.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "fquery":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json b/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
new file mode 100644
index 0000000..e78c549
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
@@ -0,0 +1,30 @@
+
+
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": [
+ {
+ "boost_factor": 3,
+ "filter": {
+ term:{
+ "name.last":"banon"
+ }
+ }
+ },
+ {
+ "boost_factor": 3
+ },
+ {
+ "boost_factor": 3
+ }
+ ],
+ "boost" : 3,
+ "score_mode" : "avg",
+ "max_boost" : 10
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
new file mode 100644
index 0000000..3e3d30f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
@@ -0,0 +1,10 @@
+{
+ "fuzzy":{
+ "name.first":{
+ "value":"sh",
+ "fuzziness":0.1,
+ "prefix_length":1,
+ "boost":2.0
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
new file mode 100644
index 0000000..095ecc6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
@@ -0,0 +1,9 @@
+{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzy.json b/src/test/java/org/elasticsearch/index/query/fuzzy.json
new file mode 100644
index 0000000..27d8dee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzy.json
@@ -0,0 +1,5 @@
+{
+ "fuzzy":{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json
new file mode 100644
index 0000000..ccd30a9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThis.json
@@ -0,0 +1,7 @@
+{
+ fuzzy_like_this:{
+ fields:["name.first", "name.last"],
+ like_text:"something",
+ max_query_terms:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json
new file mode 100644
index 0000000..114ebe5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/fuzzyLikeThisField.json
@@ -0,0 +1,8 @@
+{
+ fuzzy_like_this_field:{
+ "name.first":{
+ like_text:"something",
+ max_query_terms:12
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geoShape-filter.json b/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
new file mode 100644
index 0000000..a4392ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
@@ -0,0 +1,21 @@
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geoShape-query.json b/src/test/java/org/elasticsearch/index/query/geoShape-query.json
new file mode 100644
index 0000000..e0af827
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geoShape-query.json
@@ -0,0 +1,14 @@
+{
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
new file mode 100644
index 0000000..6db6d5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
new file mode 100644
index 0000000..8d04915
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
new file mode 100644
index 0000000..6321654
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":{
+ "lat":40,
+ "lon":-70
+ },
+ "bottom_right":{
+ "lat":30,
+ "lon":-80
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
new file mode 100644
index 0000000..0899960
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"40, -70",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
new file mode 100644
index 0000000..170a02d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"drn5x1g8cu2y",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
new file mode 100644
index 0000000..347a463
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_right":"40, -80",
+ "bottom_left":"30, -70"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json b/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
new file mode 100644
index 0000000..96ccbd0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "right": -80,
+ "top": 40,
+ "left": -70,
+ "bottom": 30
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance-named.json b/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
new file mode 100644
index 0000000..a3e0be9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance1.json b/src/test/java/org/elasticsearch/index/query/geo_distance1.json
new file mode 100644
index 0000000..cf3b0ab
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance1.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance10.json b/src/test/java/org/elasticsearch/index/query/geo_distance10.json
new file mode 100644
index 0000000..067b39e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance10.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance11.json b/src/test/java/org/elasticsearch/index/query/geo_distance11.json
new file mode 100644
index 0000000..008d5b5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance11.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance12.json b/src/test/java/org/elasticsearch/index/query/geo_distance12.json
new file mode 100644
index 0000000..8769223
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance12.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance2.json b/src/test/java/org/elasticsearch/index/query/geo_distance2.json
new file mode 100644
index 0000000..3283867
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance2.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":[-70, 40]
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance3.json b/src/test/java/org/elasticsearch/index/query/geo_distance3.json
new file mode 100644
index 0000000..193f234
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance3.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"40, -70"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance4.json b/src/test/java/org/elasticsearch/index/query/geo_distance4.json
new file mode 100644
index 0000000..56a7409
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance4.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"drn5x1g8cu2y"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance5.json b/src/test/java/org/elasticsearch/index/query/geo_distance5.json
new file mode 100644
index 0000000..bea9a3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance5.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":12,
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance6.json b/src/test/java/org/elasticsearch/index/query/geo_distance6.json
new file mode 100644
index 0000000..4afa128
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12",
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance7.json b/src/test/java/org/elasticsearch/index/query/geo_distance7.json
new file mode 100644
index 0000000..7fcf8bd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance7.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance8.json b/src/test/java/org/elasticsearch/index/query/geo_distance8.json
new file mode 100644
index 0000000..3bafd16
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance8.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_distance9.json b/src/test/java/org/elasticsearch/index/query/geo_distance9.json
new file mode 100644
index 0000000..e6c8f12
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_distance9.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json b/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
new file mode 100644
index 0000000..91256c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon1.json b/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
new file mode 100644
index 0000000..99ac329
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon2.json b/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
new file mode 100644
index 0000000..588b22f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
@@ -0,0 +1,27 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ {
+ "lat":40,
+ "lon":-70
+ },
+ {
+ "lat":30,
+ "lon":-80
+ },
+ {
+ "lat":20,
+ "lon":-90
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon3.json b/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
new file mode 100644
index 0000000..d6d905b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "40, -70",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/geo_polygon4.json b/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
new file mode 100644
index 0000000..ae9608d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "drn5x1g8cu2y",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java b/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
new file mode 100644
index 0000000..9b206a0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserModuleTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = settingsBuilder()
+ .put("index.queryparser.query.my.type", MyJsonQueryParser.class)
+ .put("index.queryparser.query.my.param1", "value1")
+ .put("index.queryparser.filter.my.type", MyJsonFilterParser.class)
+ .put("index.queryparser.filter.my.param2", "value2")
+ .put("index.cache.filter.type", "none")
+ .build();
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new CacheRecyclerModule(settings),
+ new CodecModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ new IndexQueryParserModule(settings),
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ MyJsonQueryParser myJsonQueryParser = (MyJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+ assertThat(myJsonQueryParser.settings().get("param1"), equalTo("value1"));
+
+ MyJsonFilterParser myJsonFilterParser = (MyJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+ assertThat(myJsonFilterParser.settings().get("param2"), equalTo("value2"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java b/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java
new file mode 100644
index 0000000..09b53d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/MyJsonFilterParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.FilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MyJsonFilterParser extends AbstractIndexComponent implements FilterParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public MyJsonFilterParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java b/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
new file mode 100644
index 0000000..582ef13
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MyJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public MyJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json b/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
new file mode 100644
index 0000000..4b055cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "filter":{
+ "and" : {
+ "filters" : [
+ {
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ }
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/has-child.json b/src/test/java/org/elasticsearch/index/query/has-child.json
new file mode 100644
index 0000000..c87ac17
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/has-child.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "filter":{
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ },
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/limit-filter.json b/src/test/java/org/elasticsearch/index/query/limit-filter.json
new file mode 100644
index 0000000..549f331
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/limit-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "filter":{
+ "limit":{
+ "value":2
+ }
+ },
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mapping.json b/src/test/java/org/elasticsearch/index/query/mapping.json
new file mode 100644
index 0000000..3939249
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mapping.json
@@ -0,0 +1,15 @@
+{
+ "person":{
+ "properties":{
+ "location":{
+ "type":"geo_point"
+ },
+ "country" : {
+ "type" : "geo_shape"
+ },
+ "born":{
+ "type":"date"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json b/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
new file mode 100644
index 0000000..47d1227
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
@@ -0,0 +1,8 @@
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "type" : "doesNotExist"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/matchAll.json b/src/test/java/org/elasticsearch/index/query/matchAll.json
new file mode 100644
index 0000000..3325646
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/matchAll.json
@@ -0,0 +1,5 @@
+{
+ match_all:{
+ boost:1.2
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match_all_empty1.json b/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
new file mode 100644
index 0000000..6dd141f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
@@ -0,0 +1,3 @@
+{
+ "match_all": {}
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/match_all_empty2.json b/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
new file mode 100644
index 0000000..a0549df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
@@ -0,0 +1,3 @@
+{
+ "match_all": []
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mlt.json b/src/test/java/org/elasticsearch/index/query/mlt.json
new file mode 100644
index 0000000..3f45bb4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mlt.json
@@ -0,0 +1,8 @@
+{
+ more_like_this:{
+ fields:["name.first", "name.last"],
+ like_text:"something",
+ min_term_freq:1,
+ max_query_terms:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/mltField.json b/src/test/java/org/elasticsearch/index/query/mltField.json
new file mode 100644
index 0000000..9f9eb59
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/mltField.json
@@ -0,0 +1,9 @@
+{
+ more_like_this_field:{
+ "name.first":{
+ like_text:"something",
+ min_term_freq:1,
+ max_query_terms:12
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
new file mode 100644
index 0000000..9c3b751
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
@@ -0,0 +1,7 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ],
+ "type":"doesNotExist"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
new file mode 100644
index 0000000..d29211d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": "myField"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json b/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
new file mode 100644
index 0000000..904ba0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter.json b/src/test/java/org/elasticsearch/index/query/not-filter.json
new file mode 100644
index 0000000..42c48d8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "filter":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter2.json b/src/test/java/org/elasticsearch/index/query/not-filter2.json
new file mode 100644
index 0000000..6defaff
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter2.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/not-filter3.json b/src/test/java/org/elasticsearch/index/query/not-filter3.json
new file mode 100644
index 0000000..ab61335
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/not-filter3.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ },
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json b/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json
new file mode 100644
index 0000000..fbae8ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/numeric_range-filter.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "numeric_range":{
+ "age":{
+ "from":"23",
+ "to":"54",
+ "include_lower":true,
+ "include_upper":false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/or-filter.json b/src/test/java/org/elasticsearch/index/query/or-filter.json
new file mode 100644
index 0000000..b1e73fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/or-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/or-filter2.json b/src/test/java/org/elasticsearch/index/query/or-filter2.json
new file mode 100644
index 0000000..2c15e9a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/or-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
new file mode 100644
index 0000000..e04c346
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPlugin2Tests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addQueryParser("my", PluginJsonQueryParser.class);
+ queryParserModule.addFilterParser("my", PluginJsonFilterParser.class);
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new CodecModule(settings),
+ new CacheRecyclerModule(settings),
+ new SettingsModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ PluginJsonFilterParser myJsonFilterParser = (PluginJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
new file mode 100644
index 0000000..53b1cb5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.cache.recycler.CacheRecyclerModule;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPluginTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addProcessor(new IndexQueryParserModule.QueryParsersProcessor() {
+ @Override
+ public void processXContentQueryParsers(XContentQueryParsersBindings bindings) {
+ bindings.processXContentQueryParser("my", PluginJsonQueryParser.class);
+ }
+
+ @Override
+ public void processXContentFilterParsers(XContentFilterParsersBindings bindings) {
+ bindings.processXContentQueryFilter("my", PluginJsonFilterParser.class);
+ }
+ });
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new CacheRecyclerModule(settings),
+ new ThreadPoolModule(settings),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new IndexEngineModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new CodecModule(settings),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ PluginJsonFilterParser myJsonFilterParser = (PluginJsonFilterParser) indexQueryParserService.filterParser("my");
+ assertThat(myJsonFilterParser.names()[0], equalTo("my"));
+
+ injector.getInstance(ThreadPool.class).shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java
new file mode 100644
index 0000000..194060e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonFilterParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.apache.lucene.search.Filter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.FilterParser;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PluginJsonFilterParser extends AbstractIndexComponent implements FilterParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public PluginJsonFilterParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
new file mode 100644
index 0000000..d475cdf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PluginJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public PluginJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-boost.json b/src/test/java/org/elasticsearch/index/query/prefix-boost.json
new file mode 100644
index 0000000..4da623a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-boost.json
@@ -0,0 +1,8 @@
+{
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json b/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
new file mode 100644
index 0000000..de01701
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh",
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-filter.json b/src/test/java/org/elasticsearch/index/query/prefix-filter.json
new file mode 100644
index 0000000..1f2e42e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh"
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json b/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
new file mode 100644
index 0000000..83e56cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
@@ -0,0 +1,8 @@
+{
+ prefix:{
+ "name.first":{
+ prefix:"sh",
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/prefix.json b/src/test/java/org/elasticsearch/index/query/prefix.json
new file mode 100644
index 0000000..49f5261
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/prefix.json
@@ -0,0 +1,5 @@
+{
+ prefix:{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields-match.json b/src/test/java/org/elasticsearch/index/query/query-fields-match.json
new file mode 100644
index 0000000..c15cdf3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields-match.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["name.*"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields1.json b/src/test/java/org/elasticsearch/index/query/query-fields1.json
new file mode 100644
index 0000000..84abcaa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields1.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields2.json b/src/test/java/org/elasticsearch/index/query/query-fields2.json
new file mode 100644
index 0000000..ab39c87
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields2.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-fields3.json b/src/test/java/org/elasticsearch/index/query/query-fields3.json
new file mode 100644
index 0000000..8114c1b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-fields3.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content^2.2", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query-filter.json b/src/test/java/org/elasticsearch/index/query/query-filter.json
new file mode 100644
index 0000000..dee136d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query-filter.json
@@ -0,0 +1,16 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ query:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/query.json b/src/test/java/org/elasticsearch/index/query/query.json
new file mode 100644
index 0000000..f07a0d8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ default_field:"content",
+ phrase_slop:1,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/query2.json b/src/test/java/org/elasticsearch/index/query/query2.json
new file mode 100644
index 0000000..410e05c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/query2.json
@@ -0,0 +1,6 @@
+{
+ query_string:{
+ default_field:"age",
+ query:"12~0.2"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range-filter-named.json b/src/test/java/org/elasticsearch/index/query/range-filter-named.json
new file mode 100644
index 0000000..1b50177
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range-filter-named.json
@@ -0,0 +1,20 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "range":{
+ "age":{
+ "from":"23",
+ "to":"54",
+ "include_lower":true,
+ "include_upper":false
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range-filter.json b/src/test/java/org/elasticsearch/index/query/range-filter.json
new file mode 100644
index 0000000..3842e0b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range-filter.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range.json b/src/test/java/org/elasticsearch/index/query/range.json
new file mode 100644
index 0000000..cc2363f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range.json
@@ -0,0 +1,10 @@
+{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/range2.json b/src/test/java/org/elasticsearch/index/query/range2.json
new file mode 100644
index 0000000..c116b3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/range2.json
@@ -0,0 +1,8 @@
+{
+ range:{
+ age:{
+ gte:"23",
+ lt:"54"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-boost.json b/src/test/java/org/elasticsearch/index/query/regexp-boost.json
new file mode 100644
index 0000000..ed8699b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-boost.json
@@ -0,0 +1,8 @@
+{
+ "regexp":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
new file mode 100644
index 0000000..112f8fb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
@@ -0,0 +1,20 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test",
+ "_cache" : true,
+ "_cache_key" : "key"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
new file mode 100644
index 0000000..a5d7307
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
@@ -0,0 +1,18 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json b/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
new file mode 100644
index 0000000..ac96b3e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y",
+ "_name" : "test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp-filter.json b/src/test/java/org/elasticsearch/index/query/regexp-filter.json
new file mode 100644
index 0000000..d7c7bfd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/regexp.json b/src/test/java/org/elasticsearch/index/query/regexp.json
new file mode 100644
index 0000000..6c3d694
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/regexp.json
@@ -0,0 +1,5 @@
+{
+ "regexp":{
+ "name.first": "s.*y"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/simple-query-string.json b/src/test/java/org/elasticsearch/index/query/simple-query-string.json
new file mode 100644
index 0000000..9208e88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/simple-query-string.json
@@ -0,0 +1,8 @@
+{
+ "simple_query_string": {
+ "query": "foo bar",
+ "analyzer": "keyword",
+ "fields": ["body^5","_all"],
+ "default_operator": "and"
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
new file mode 100644
index 0000000..d9ca05b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
@@ -0,0 +1,13 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
new file mode 100644
index 0000000..edb58e3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
@@ -0,0 +1,12 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy" : {
+ "user" : {
+ "value" : "ki",
+ "boost" : 1.08
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
new file mode 100644
index 0000000..62918d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
new file mode 100644
index 0000000..d9db8a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "age" : {
+ "from" : 10,
+ "to" : 20,
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
new file mode 100644
index 0000000..d9db8a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "age" : {
+ "from" : 10,
+ "to" : 20,
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json b/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
new file mode 100644
index 0000000..a2eaeb7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "wildcard" : { "user" : {"value": "ki*y" , "boost" : 1.08}}
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json b/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
new file mode 100644
index 0000000..9849c10
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
@@ -0,0 +1,29 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ field_masking_span:{
+ query:{
+ span_term:{
+ age_1 : 36
+ }
+ },
+ field:"age"
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanFirst.json b/src/test/java/org/elasticsearch/index/query/spanFirst.json
new file mode 100644
index 0000000..9972c76
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanFirst.json
@@ -0,0 +1,10 @@
+{
+ span_first:{
+ match:{
+ span_term:{
+ age:34
+ }
+ },
+ end:12
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanNear.json b/src/test/java/org/elasticsearch/index/query/spanNear.json
new file mode 100644
index 0000000..ce17063
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanNear.json
@@ -0,0 +1,24 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanNot.json b/src/test/java/org/elasticsearch/index/query/spanNot.json
new file mode 100644
index 0000000..c90de33
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanNot.json
@@ -0,0 +1,14 @@
+{
+ span_not:{
+ include:{
+ span_term:{
+ age:34
+ }
+ },
+ exclude:{
+ span_term:{
+ age:35
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanOr.json b/src/test/java/org/elasticsearch/index/query/spanOr.json
new file mode 100644
index 0000000..06c5262
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanOr.json
@@ -0,0 +1,21 @@
+{
+ span_or:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanOr2.json b/src/test/java/org/elasticsearch/index/query/spanOr2.json
new file mode 100644
index 0000000..b64ce1c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanOr2.json
@@ -0,0 +1,30 @@
+{
+ "span_or":{
+ "clauses":[
+ {
+ "span_term":{
+ "age":{
+ "value":34,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":35,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":36,
+ "boost":1.0
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/spanTerm.json b/src/test/java/org/elasticsearch/index/query/spanTerm.json
new file mode 100644
index 0000000..0186593
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/spanTerm.json
@@ -0,0 +1,5 @@
+{
+ span_term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/starColonStar.json b/src/test/java/org/elasticsearch/index/query/starColonStar.json
new file mode 100644
index 0000000..c769ca0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/starColonStar.json
@@ -0,0 +1,5 @@
+{
+ "query_string": {
+ "query": "*:*"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-filter-named.json b/src/test/java/org/elasticsearch/index/query/term-filter-named.json
new file mode 100644
index 0000000..c23b7b3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon",
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-filter.json b/src/test/java/org/elasticsearch/index/query/term-filter.json
new file mode 100644
index 0000000..11d2bfd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term-with-boost.json b/src/test/java/org/elasticsearch/index/query/term-with-boost.json
new file mode 100644
index 0000000..5f33cd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term-with-boost.json
@@ -0,0 +1,8 @@
+{
+ term:{
+ age:{
+ value:34,
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/term.json b/src/test/java/org/elasticsearch/index/query/term.json
new file mode 100644
index 0000000..378cf42
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/term.json
@@ -0,0 +1,5 @@
+{
+ term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-filter-named.json b/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
new file mode 100644
index 0000000..2cb8c7a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-filter.json b/src/test/java/org/elasticsearch/index/query/terms-filter.json
new file mode 100644
index 0000000..04a8d26
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/terms-query.json b/src/test/java/org/elasticsearch/index/query/terms-query.json
new file mode 100644
index 0000000..a3e0d08
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/terms-query.json
@@ -0,0 +1,5 @@
+{
+ "terms":{
+ "name.first":["shay", "test"]
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/wildcard-boost.json b/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
new file mode 100644
index 0000000..53c8d82
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
@@ -0,0 +1,8 @@
+{
+ "wildcard":{
+ "name.first":{
+ "value":"sh*",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/query/wildcard.json b/src/test/java/org/elasticsearch/index/query/wildcard.json
new file mode 100644
index 0000000..c8ed852
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/query/wildcard.json
@@ -0,0 +1,5 @@
+{
+ wildcard:{
+ "name.first":"sh*"
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java b/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java
new file mode 100644
index 0000000..1bb9631
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/FieldDataTermsFilterTests.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search;
+
+import com.carrotsearch.hppc.DoubleOpenHashSet;
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.ObjectOpenHashSet;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.fielddata.IndexNumericFieldData;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ */
+public class FieldDataTermsFilterTests extends ElasticsearchTestCase {
+
+ protected IndexFieldDataService ifdService;
+ protected IndexWriter writer;
+ protected AtomicReader reader;
+ protected StringFieldMapper strMapper;
+ protected LongFieldMapper lngMapper;
+ protected DoubleFieldMapper dblMapper;
+
+ @Before
+ public void setup() throws Exception {
+ super.setUp();
+
+ // setup field mappers
+ strMapper = new StringFieldMapper.Builder("str_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ lngMapper = new LongFieldMapper.Builder("lng_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ dblMapper = new DoubleFieldMapper.Builder("dbl_value")
+ .build(new Mapper.BuilderContext(null, new ContentPath(1)));
+
+ // create index and fielddata service
+ ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
+ writer = new IndexWriter(new RAMDirectory(),
+ new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
+
+ int numDocs = 10;
+ for (int i = 0; i < numDocs; i++) {
+ Document d = new Document();
+ d.add(new StringField(strMapper.names().indexName(), "str" + i, Field.Store.NO));
+ d.add(new LongField(lngMapper.names().indexName(), i, Field.Store.NO));
+ d.add(new DoubleField(dblMapper.names().indexName(), Double.valueOf(i), Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ reader.close();
+ writer.close();
+ ifdService.clear();
+ SearchContext.removeCurrent();
+ }
+
+ protected <IFD extends IndexFieldData> IFD getFieldData(FieldMapper fieldMapper) {
+ return ifdService.getForField(fieldMapper);
+ }
+
+ protected <IFD extends IndexNumericFieldData> IFD getFieldData(NumberFieldMapper fieldMapper) {
+ return ifdService.getForField(fieldMapper);
+ }
+
+ @Test
+ public void testBytes() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ ObjectOpenHashSet<BytesRef> hTerms = new ObjectOpenHashSet<BytesRef>();
+ List<BytesRef> cTerms = new ArrayList<BytesRef>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ BytesRef term = new BytesRef("str" + docs.get(i));
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newBytes(getFieldData(strMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(strMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+
+ // filter on a numeric field using BytesRef terms
+ // should not match any docs
+ hFilter = FieldDataTermsFilter.newBytes(getFieldData(lngMapper), hTerms);
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(0));
+
+ // filter on a numeric field using BytesRef terms
+ // should not match any docs
+ hFilter = FieldDataTermsFilter.newBytes(getFieldData(dblMapper), hTerms);
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(0));
+ }
+
+ @Test
+ public void testLongs() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ LongOpenHashSet hTerms = new LongOpenHashSet();
+ List<Long> cTerms = new ArrayList<Long>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ long term = docs.get(i).longValue();
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newLongs(getFieldData(lngMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(lngMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ hFilter = FieldDataTermsFilter.newLongs(getFieldData(dblMapper), hTerms);
+ assertNull(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+
+ @Test
+ public void testDoubles() throws Exception {
+ List<Integer> docs = Arrays.asList(1, 5, 7);
+
+ DoubleOpenHashSet hTerms = new DoubleOpenHashSet();
+ List<Double> cTerms = new ArrayList<Double>(docs.size());
+ for (int i = 0; i < docs.size(); i++) {
+ double term = Double.valueOf(docs.get(i));
+ hTerms.add(term);
+ cTerms.add(term);
+ }
+
+ FieldDataTermsFilter hFilter = FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), hTerms);
+
+ int size = reader.maxDoc();
+ FixedBitSet result = new FixedBitSet(size);
+
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ // filter from mapper
+ result.clear(0, size);
+ assertThat(result.cardinality(), equalTo(0));
+ result.or(dblMapper.termsFilter(ifdService, cTerms, null)
+ .getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
+ assertThat(result.cardinality(), equalTo(docs.size()));
+ for (int i = 0; i < reader.maxDoc(); i++) {
+ assertThat(result.get(i), equalTo(docs.contains(i)));
+ }
+
+ hFilter = FieldDataTermsFilter.newDoubles(getFieldData(lngMapper), hTerms);
+ assertNull(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+
+ @Test
+ public void testNoTerms() throws Exception {
+ FieldDataTermsFilter hFilterBytes = FieldDataTermsFilter.newBytes(getFieldData(strMapper), new ObjectOpenHashSet<BytesRef>());
+ FieldDataTermsFilter hFilterLongs = FieldDataTermsFilter.newLongs(getFieldData(lngMapper), new LongOpenHashSet());
+ FieldDataTermsFilter hFilterDoubles = FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), new DoubleOpenHashSet());
+ assertNull(hFilterBytes.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ assertNull(hFilterLongs.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ assertNull(hFilterDoubles.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java b/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
new file mode 100644
index 0000000..ee4bea4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+
+import java.io.IOException;
+
+class BitSetCollector extends NoopCollector {
+
+ final FixedBitSet result;
+ int docBase;
+
+ BitSetCollector(int topLevelMaxDoc) {
+ this.result = new FixedBitSet(topLevelMaxDoc);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ result.set(docBase + doc);
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext context) throws IOException {
+ docBase = context.docBase;
+ }
+
+ FixedBitSet getResult() {
+ return result;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
new file mode 100644
index 0000000..04152f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.compress.CompressedString;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.cache.id.SimpleIdCacheTests;
+import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MapperTestUtils;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.Description;
+import org.hamcrest.StringDescription;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+
+ for (int parent = 1; parent <= 5; parent++) {
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+
+ for (int child = 1; child <= 3; child++) {
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(parent * 3 + child)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField("field1", "value" + child, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+ }
+
+ IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+
+ TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3))));
+ TermFilter parentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ int shortCircuitParentDocSet = random().nextInt(5);
+ ChildrenConstantScoreQuery query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, null);
+
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ assertThat(actualResult.cardinality(), equalTo(5));
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableSet<String>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableSet<String> parentIds;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIds = childValueToParentIds.lget();
+ } else {
+ childValueToParentIds.put(childValue, parentIds = new TreeSet<String>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ parentIds.add(parent);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+
+ indexWriter.commit();
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter parentFilter;
+ if (random().nextBoolean()) {
+ parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
+ } else {
+ parentFilter = rawParentFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ TermQuery childQuery = new TermQuery(new Term("field1", childValue));
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
+ Query query;
+ if (random().nextBoolean()) {
+ // Usage in HasChildQueryParser
+ query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
+ } else {
+ // Usage in HasChildFilterParser
+ query = new XConstantScoreQuery(
+ new CustomQueryWrappingFilter(
+ new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter)
+ )
+ );
+ }
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (childValueToParentIds.containsKey(childValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> parentIds = childValueToParentIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (String id : parentIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ static void assertBitSet(FixedBitSet actual, FixedBitSet expected, IndexSearcher searcher) throws IOException {
+ if (!actual.equals(expected)) {
+ Description description = new StringDescription();
+ description.appendText(reason(actual, expected, searcher));
+ description.appendText("\nExpected: ");
+ description.appendValue(expected);
+ description.appendText("\n got: ");
+ description.appendValue(actual);
+ description.appendText("\n");
+ throw new java.lang.AssertionError(description.toString());
+ }
+ }
+
+ static String reason(FixedBitSet actual, FixedBitSet expected, IndexSearcher indexSearcher) throws IOException {
+ StringBuilder builder = new StringBuilder();
+ builder.append("expected cardinality:").append(expected.cardinality()).append('\n');
+ DocIdSetIterator iterator = expected.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Expected doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ builder.append("actual cardinality: ").append(actual.cardinality()).append('\n');
+ iterator = actual.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Actual doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ return builder.toString();
+ }
+
+ static SearchContext createSearchContext(String indexName, String parentType, String childType) throws IOException {
+ final Index index = new Index(indexName);
+ final IdCache idCache = new SimpleIdCache(index, ImmutableSettings.EMPTY);
+ final CacheRecycler cacheRecycler = new CacheRecycler(ImmutableSettings.EMPTY);
+ final PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(ImmutableSettings.EMPTY, new ThreadPool());
+ Settings settings = ImmutableSettings.EMPTY;
+ MapperService mapperService = MapperTestUtils.newMapperService(index, settings);
+ final IndexService indexService = new SimpleIdCacheTests.StubIndexService(mapperService);
+ idCache.setIndexService(indexService);
+ // Id_cache is now registered as document type listener, so we can add mappings.
+ mapperService.merge(
+ childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
+ );
+
+ ThreadPool threadPool = new ThreadPool();
+ NodeSettingsService nodeSettingsService = new NodeSettingsService(settings);
+ IndicesFilterCache indicesFilterCache = new IndicesFilterCache(settings, threadPool, cacheRecycler, nodeSettingsService);
+ WeightedFilterCache filterCache = new WeightedFilterCache(index, settings, indicesFilterCache);
+ return new TestSearchContext(cacheRecycler, pageCacheRecycler, idCache, indexService, filterCache);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
new file mode 100644
index 0000000..06a5db6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ChildrenQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIdToChildScores = childValueToParentIds.lget();
+ } else {
+ childValueToParentIds.put(childValue, parentIdToChildScores = new TreeMap<String, FloatArrayList>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ FloatArrayList childScores = parentIdToChildScores.get(parent);
+ if (childScores == null) {
+ parentIdToChildScores.put(parent, childScores = new FloatArrayList());
+ }
+ childScores.add(1f);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ChildrenQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter parentFilter;
+ if (random().nextBoolean()) {
+ parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
+ } else {
+ parentFilter = rawParentFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ Query childQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", childValue)));
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
+ Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
+ Query query = new ChildrenQuery("parent", "child", parentFilter, childQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ MockScorer mockScorer = new MockScorer(scoreType);
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ expectedTopDocsCollector.setScorer(mockScorer);
+ if (childValueToParentIds.containsKey(childValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ mockScorer.scores = entry.getValue();
+ expectedTopDocsCollector.collect(docsEnum.docID());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ static void assertTopDocs(TopDocs actual, TopDocs expected) {
+ assertThat("actual.totalHits != expected.totalHits", actual.totalHits, equalTo(expected.totalHits));
+ assertThat("actual.getMaxScore() != expected.getMaxScore()", actual.getMaxScore(), equalTo(expected.getMaxScore()));
+ assertThat("actual.scoreDocs.length != expected.scoreDocs.length", actual.scoreDocs.length, equalTo(actual.scoreDocs.length));
+ for (int i = 0; i < actual.scoreDocs.length; i++) {
+ ScoreDoc actualHit = actual.scoreDocs[i];
+ ScoreDoc expectedHit = expected.scoreDocs[i];
+ assertThat("actualHit.doc != expectedHit.doc", actualHit.doc, equalTo(expectedHit.doc));
+ assertThat("actualHit.score != expectedHit.score", actualHit.score, equalTo(expectedHit.score));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/MockScorer.java b/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
new file mode 100644
index 0000000..7fe3fd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+class MockScorer extends Scorer {
+
+ final ScoreType scoreType;
+ FloatArrayList scores;
+
+ MockScorer(ScoreType scoreType) {
+ super(null);
+ this.scoreType = scoreType;
+ }
+
+ @Override
+ public float score() throws IOException {
+ float aggregateScore = 0;
+ for (int i = 0; i < scores.elementsCount; i++) {
+ float score = scores.buffer[i];
+ switch (scoreType) {
+ case MAX:
+ if (aggregateScore < score) {
+ aggregateScore = score;
+ }
+ break;
+ case SUM:
+ case AVG:
+ aggregateScore += score;
+ break;
+ }
+ }
+
+ if (scoreType == ScoreType.AVG) {
+ aggregateScore /= scores.elementsCount;
+ }
+
+ return aggregateScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int docID() {
+ return 0;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return 0;
+ }
+
+ @Override
+ public long cost() {
+ return 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
new file mode 100644
index 0000000..5907113
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+
+/**
+ */
+public class ParentConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableSet<String>> parentValueToChildDocIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
+ IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String child = Integer.toString(childDocId++);
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableSet<String> childIds;
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ childIds = parentValueToChildDocIds.lget();
+ } else {
+ parentValueToChildDocIds.put(parentValue, childIds = new TreeSet<String>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ childIds.add(child);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQuery.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter childrenFilter;
+ if (random().nextBoolean()) {
+ childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
+ } else {
+ childrenFilter = rawChildrenFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ TermQuery parentQuery = new TermQuery(new Term("field1", parentValue));
+ Query query;
+ if (random().nextBoolean()) {
+ // Usage in HasParentQueryParser
+ query = new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter);
+ } else {
+ // Usage in HasParentFilterParser
+ query = new XConstantScoreQuery(
+ new CustomQueryWrappingFilter(
+ new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter)
+ )
+ );
+ }
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> childIds = parentValueToChildDocIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (String id : childIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
new file mode 100644
index 0000000..4fb1b8e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchLuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.assertBitSet;
+import static org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.createSearchContext;
+import static org.elasticsearch.index.search.child.ChildrenQueryTests.assertTopDocs;
+
+public class ParentQueryTests extends ElasticsearchLuceneTestCase {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ forceDefaultCodec();
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
+ ObjectObjectOpenHashMap<String, NavigableMap<String, Float>> parentValueToChildIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, Float>>();
+ IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs;
+ if (rarely()) {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
+ } else {
+ numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
+ }
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ String child = Integer.toString(childDocId++);
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableMap<String, Float> childIdToScore;
+ if (parentValueToChildIds.containsKey(parentValue)) {
+ childIdToScore = parentValueToChildIds.lget();
+ } else {
+ parentValueToChildIds.put(parentValue, childIdToScore = new TreeMap<String, Float>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ assertFalse("child ["+ child + "] already has a score", childIdToScore.containsKey(child));
+ childIdToScore.put(child, 1f);
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
+ ParentQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
+ Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
+ // that deletes are applied at the top level when filters are cached.
+ Filter childrenFilter;
+ if (random().nextBoolean()) {
+ childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
+ } else {
+ childrenFilter = rawChildrenFilter;
+ }
+
+ // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ Filter filterMe;
+ if (random().nextBoolean()) {
+ filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
+ } else {
+ filterMe = rawFilterMe;
+ }
+
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.SimpleSearcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ Query parentQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", parentValue)));
+ Query query = new ParentQuery(parentQuery,"parent", childrenFilter);
+ query = new XFilteredQuery(query, filterMe);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ MockScorer mockScorer = new MockScorer(ScoreType.MAX); // just save one score per parent...
+ mockScorer.scores = new FloatArrayList();
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
+ expectedTopDocsCollector.setScorer(mockScorer);
+ if (parentValueToChildIds.containsKey(parentValue)) {
+ AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, Float> childIdsAndScore = parentValueToChildIds.lget();
+ TermsEnum termsEnum = terms.iterator(null);
+ DocsEnum docsEnum = null;
+ for (Map.Entry<String, Float> entry : childIdsAndScore.entrySet()) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ mockScorer.scores.add(entry.getValue());
+ expectedTopDocsCollector.collect(docsEnum.docID());
+ mockScorer.scores.clear();
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java b/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java
new file mode 100644
index 0000000..ccd3ac7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/child/TestSearchContext.java
@@ -0,0 +1,583 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.docset.DocSetCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.cache.id.IdCache;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedFilter;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.facet.SearchContextFacets;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.partial.PartialFieldsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+
+import java.util.List;
+
+public class TestSearchContext extends SearchContext {
+
+ final CacheRecycler cacheRecycler;
+ final PageCacheRecycler pageCacheRecycler;
+ final IdCache idCache;
+ final IndexService indexService;
+ final FilterCache filterCache;
+
+ ContextIndexSearcher searcher;
+ int size;
+
+ public TestSearchContext(CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, IdCache idCache, IndexService indexService, FilterCache filterCache) {
+ this.cacheRecycler = cacheRecycler;
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.idCache = idCache;
+ this.indexService = indexService;
+ this.filterCache = filterCache;
+ }
+
+ public TestSearchContext() {
+ this.cacheRecycler = null;
+ this.pageCacheRecycler = null;
+ this.idCache = null;
+ this.indexService = null;
+ this.filterCache = null;
+ }
+
+ @Override
+ public boolean clearAndRelease() {
+ return false;
+ }
+
+ @Override
+ public void preProcess() {
+ }
+
+ @Override
+ public Filter searchFilter(String[] types) {
+ return null;
+ }
+
+ @Override
+ public long id() {
+ return 0;
+ }
+
+ @Override
+ public String source() {
+ return null;
+ }
+
+ @Override
+ public ShardSearchRequest request() {
+ return null;
+ }
+
+ @Override
+ public SearchType searchType() {
+ return null;
+ }
+
+ @Override
+ public SearchContext searchType(SearchType searchType) {
+ return null;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return null;
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 0;
+ }
+
+ @Override
+ public boolean hasTypes() {
+ return false;
+ }
+
+ @Override
+ public String[] types() {
+ return new String[0];
+ }
+
+ @Override
+ public float queryBoost() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext queryBoost(float queryBoost) {
+ return null;
+ }
+
+ @Override
+ public long nowInMillis() {
+ return 0;
+ }
+
+ @Override
+ public Scroll scroll() {
+ return null;
+ }
+
+ @Override
+ public SearchContext scroll(Scroll scroll) {
+ return null;
+ }
+
+ @Override
+ public SearchContextFacets facets() {
+ return null;
+ }
+
+ @Override
+ public SearchContext facets(SearchContextFacets facets) {
+ return null;
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return null;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ return null;
+ }
+
+ @Override
+ public SearchContextHighlight highlight() {
+ return null;
+ }
+
+ @Override
+ public void highlight(SearchContextHighlight highlight) {
+ }
+
+ @Override
+ public SuggestionSearchContext suggest() {
+ return null;
+ }
+
+ @Override
+ public void suggest(SuggestionSearchContext suggest) {
+ }
+
+ @Override
+ public RescoreSearchContext rescore() {
+ return null;
+ }
+
+ @Override
+ public void rescore(RescoreSearchContext rescore) {
+ }
+
+ @Override
+ public boolean hasFieldDataFields() {
+ return false;
+ }
+
+ @Override
+ public FieldDataFieldsContext fieldDataFields() {
+ return null;
+ }
+
+ @Override
+ public boolean hasScriptFields() {
+ return false;
+ }
+
+ @Override
+ public ScriptFieldsContext scriptFields() {
+ return null;
+ }
+
+ @Override
+ public boolean hasPartialFields() {
+ return false;
+ }
+
+ @Override
+ public PartialFieldsContext partialFields() {
+ return null;
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ return false;
+ }
+
+ @Override
+ public boolean hasFetchSourceContext() {
+ return false;
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ return null;
+ }
+
+ @Override
+ public ContextIndexSearcher searcher() {
+ return searcher;
+ }
+
+ void setSearcher(ContextIndexSearcher searcher) {
+ this.searcher = searcher;
+ }
+
+ @Override
+ public IndexShard indexShard() {
+ return null;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ return indexService.mapperService();
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return indexService.analysisService();
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return null;
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public ScriptService scriptService() {
+ return null;
+ }
+
+ @Override
+ public CacheRecycler cacheRecycler() {
+ return cacheRecycler;
+ }
+
+ @Override
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ @Override
+ public FilterCache filterCache() {
+ return filterCache;
+ }
+
+ @Override
+ public DocSetCache docSetCache() {
+ return null;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return null;
+ }
+
+ @Override
+ public IdCache idCache() {
+ return idCache;
+ }
+
+ @Override
+ public long timeoutInMillis() {
+ return 0;
+ }
+
+ @Override
+ public void timeoutInMillis(long timeoutInMillis) {
+ }
+
+ @Override
+ public SearchContext minimumScore(float minimumScore) {
+ return null;
+ }
+
+ @Override
+ public Float minimumScore() {
+ return null;
+ }
+
+ @Override
+ public SearchContext sort(Sort sort) {
+ return null;
+ }
+
+ @Override
+ public Sort sort() {
+ return null;
+ }
+
+ @Override
+ public SearchContext trackScores(boolean trackScores) {
+ return null;
+ }
+
+ @Override
+ public boolean trackScores() {
+ return false;
+ }
+
+ @Override
+ public SearchContext parsedPostFilter(ParsedFilter postFilter) {
+ return null;
+ }
+
+ @Override
+ public ParsedFilter parsedPostFilter() {
+ return null;
+ }
+
+ @Override
+ public Filter aliasFilter() {
+ return null;
+ }
+
+ @Override
+ public SearchContext parsedQuery(ParsedQuery query) {
+ return null;
+ }
+
+ @Override
+ public ParsedQuery parsedQuery() {
+ return null;
+ }
+
+ @Override
+ public Query query() {
+ return null;
+ }
+
+ @Override
+ public boolean queryRewritten() {
+ return false;
+ }
+
+ @Override
+ public SearchContext updateRewriteQuery(Query rewriteQuery) {
+ return null;
+ }
+
+ @Override
+ public int from() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext from(int from) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+
+
+ @Override
+ public SearchContext size(int size) {
+ return null;
+ }
+
+ @Override
+ public boolean hasFieldNames() {
+ return false;
+ }
+
+ @Override
+ public List<String> fieldNames() {
+ return null;
+ }
+
+ @Override
+ public void emptyFieldNames() {
+ }
+
+ @Override
+ public boolean explain() {
+ return false;
+ }
+
+ @Override
+ public void explain(boolean explain) {
+ }
+
+ @Override
+ public List<String> groupStats() {
+ return null;
+ }
+
+ @Override
+ public void groupStats(List<String> groupStats) {
+ }
+
+ @Override
+ public boolean version() {
+ return false;
+ }
+
+ @Override
+ public void version(boolean version) {
+ }
+
+ @Override
+ public int[] docIdsToLoad() {
+ return new int[0];
+ }
+
+ @Override
+ public int docIdsToLoadFrom() {
+ return 0;
+ }
+
+ @Override
+ public int docIdsToLoadSize() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ return null;
+ }
+
+ @Override
+ public void accessed(long accessTime) {
+ }
+
+ @Override
+ public long lastAccessTime() {
+ return 0;
+ }
+
+ @Override
+ public long keepAlive() {
+ return 0;
+ }
+
+ @Override
+ public void keepAlive(long keepAlive) {
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ return null;
+ }
+
+ @Override
+ public DfsSearchResult dfsResult() {
+ return null;
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return null;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ return null;
+ }
+
+ @Override
+ public void addReleasable(Releasable releasable) {
+ }
+
+ @Override
+ public void clearReleasables() {
+ }
+
+ @Override
+ public ScanContext scanContext() {
+ return null;
+ }
+
+ @Override
+ public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
+ return null;
+ }
+
+ @Override
+ public FieldMappers smartNameFieldMappers(String name) {
+ return null;
+ }
+
+ @Override
+ public FieldMapper smartNameFieldMapper(String name) {
+ return null;
+ }
+
+ @Override
+ public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
+ return null;
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ return false;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000..a20ee88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDistanceCheck() {
+ // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true"
+ GeoDistance.DistanceBoundingCheck check = GeoDistance.distanceBoundingCheck(0, 0, 50, DistanceUnit.MILES);
+ assertThat(check.isWithin(0.5, 0.5), equalTo(true));
+ assertThat(check.isWithin(0.52, 0.52), equalTo(true));
+ assertThat(check.isWithin(1, 1), equalTo(false));
+
+ check = GeoDistance.distanceBoundingCheck(0, 179, 200, DistanceUnit.MILES);
+ assertThat(check.isWithin(0, -179), equalTo(true));
+ assertThat(check.isWithin(0, -178), equalTo(false));
+ }
+
+ @Test
+ public void testArcDistanceVsPlaneInEllipsis() {
+ GeoPoint centre = new GeoPoint(48.8534100, 2.3488000);
+ GeoPoint northernPoint = new GeoPoint(48.8801108681, 2.35152032666);
+ GeoPoint westernPoint = new GeoPoint(48.85265, 2.308896);
+
+ // With GeoDistance.ARC both the northern and western points are within the 4km range
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+
+ // With GeoDistance.PLANE, only the northern point is within the 4km range,
+ // the western point is outside of the range due to the simple math it employs,
+ // meaning results will appear elliptical
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), greaterThan(4D));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
new file mode 100644
index 0000000..7e872e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ *
+ */
+public class GeoHashUtilsTests extends ElasticsearchTestCase {
+
+ /**
+ * Pass condition: lat=42.6, lng=-5.6 should be encoded as "ezs42e44yx96",
+ * lat=57.64911 lng=10.40744 should be encoded as "u4pruydqqvj8"
+ */
+ @Test
+ public void testEncode() {
+ String hash = GeoHashUtils.encode(42.6, -5.6);
+ assertEquals("ezs42e44yx96", hash);
+
+ hash = GeoHashUtils.encode(57.64911, 10.40744);
+ assertEquals("u4pruydqqvj8", hash);
+ }
+
+ /**
+ * Pass condition: lat=52.3738007, lng=4.8909347 should be encoded and then
+ * decoded within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodePreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(52.3738007, 4.8909347);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(52.3738007, point.lat(), 0.00001D);
+ assertEquals(4.8909347, point.lon(), 0.00001D);
+ }
+
+ /**
+ * Pass condition: lat=84.6, lng=10.5 should be encoded and then decoded
+ * within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodeImpreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(84.6, 10.5);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(84.6, point.lat(), 0.00001D);
+ assertEquals(10.5, point.lon(), 0.00001D);
+ }
+
+ /*
+ * see https://issues.apache.org/jira/browse/LUCENE-1815 for details
+ */
+
+ @Test
+ public void testDecodeEncode() {
+ String geoHash = "u173zq37x014";
+ assertEquals(geoHash, GeoHashUtils.encode(52.3738007, 4.8909347));
+ GeoPoint decode = GeoHashUtils.decode(geoHash);
+ assertEquals(52.37380061d, decode.lat(), 0.000001d);
+ assertEquals(4.8909343d, decode.lon(), 0.000001d);
+
+ assertEquals(geoHash, GeoHashUtils.encode(decode.lat(), decode.lon()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
new file mode 100644
index 0000000..6ac7198
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import org.apache.lucene.spatial.prefix.tree.Cell;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoUtilsTests extends ElasticsearchTestCase {
+
+ /**
+ * Test special values like inf, NaN and -0.0.
+ */
+ @Test
+ public void testSpecials() {
+ assertThat(GeoUtils.normalizeLon(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NaN), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NaN), equalTo(Double.NaN));
+ assertThat(0.0, not(equalTo(-0.0)));
+ assertThat(GeoUtils.normalizeLon(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(0.0), equalTo(0.0));
+ }
+
+ /**
+ * Test bounding values.
+ */
+ @Test
+ public void testBounds() {
+ assertThat(GeoUtils.normalizeLon(-360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(180.0), equalTo(0.0));
+ // and halves
+ assertThat(GeoUtils.normalizeLon(-180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLat(-90.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLon(180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLat(90.0), equalTo(90.0));
+ }
+
+ /**
+ * Test normal values.
+ */
+ @Test
+ public void testNormal() {
+ // Near bounds
+ assertThat(GeoUtils.normalizeLon(-360.5), equalTo(-0.5));
+ assertThat(GeoUtils.normalizeLat(-180.5), equalTo(0.5));
+ assertThat(GeoUtils.normalizeLon(360.5), equalTo(0.5));
+ assertThat(GeoUtils.normalizeLat(180.5), equalTo(-0.5));
+ // and near halves
+ assertThat(GeoUtils.normalizeLon(-180.5), equalTo(179.5));
+ assertThat(GeoUtils.normalizeLat(-90.5), equalTo(-89.5));
+ assertThat(GeoUtils.normalizeLon(180.5), equalTo(-179.5));
+ assertThat(GeoUtils.normalizeLat(90.5), equalTo(89.5));
+ // Now with points, to check for longitude shifting with latitude normalization
+ // We've gone past the north pole and down the other side, the longitude will
+ // be shifted by 180
+ assertNormalizedPoint(new GeoPoint(90.5, 10), new GeoPoint(89.5, -170));
+
+ // Every 10-units, multiple full turns
+ for (int shift = -20; shift <= 20; ++shift) {
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 10.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 20.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 30.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 40.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 50.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 60.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 70.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 80.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 90.0), equalTo(90.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 100.0), equalTo(100.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 110.0), equalTo(110.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 120.0), equalTo(120.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 130.0), equalTo(130.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 140.0), equalTo(140.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 150.0), equalTo(150.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 160.0), equalTo(160.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 170.0), equalTo(170.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 190.0), equalTo(-170.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 200.0), equalTo(-160.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 210.0), equalTo(-150.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 220.0), equalTo(-140.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 230.0), equalTo(-130.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 240.0), equalTo(-120.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 250.0), equalTo(-110.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 260.0), equalTo(-100.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 270.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 280.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 290.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 300.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 310.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 320.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 330.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 340.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 350.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLon(shift * 360.0 + 360.0), equalTo(0.0));
+ }
+ for (int shift = -20; shift <= 20; ++shift) {
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 10.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 20.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 30.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 40.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 50.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 60.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 70.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 80.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 90.0), equalTo(90.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 100.0), equalTo(80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 110.0), equalTo(70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 120.0), equalTo(60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 130.0), equalTo(50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 140.0), equalTo(40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 150.0), equalTo(30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 160.0), equalTo(20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 170.0), equalTo(10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 190.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 200.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 210.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 220.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 230.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 240.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 250.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 260.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 270.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 280.0), equalTo(-80.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 290.0), equalTo(-70.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 300.0), equalTo(-60.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 310.0), equalTo(-50.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 320.0), equalTo(-40.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 330.0), equalTo(-30.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 340.0), equalTo(-20.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 350.0), equalTo(-10.0));
+ assertThat(GeoUtils.normalizeLat(shift * 360.0 + 360.0), equalTo(0.0));
+ }
+ }
+
+ /**
+ * Test huge values.
+ */
+ @Test
+ public void testHuge() {
+ assertThat(GeoUtils.normalizeLon(-36000000000181.0), equalTo(GeoUtils.normalizeLon(-181.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000180.0), equalTo(GeoUtils.normalizeLon(-180.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000179.0), equalTo(GeoUtils.normalizeLon(-179.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000178.0), equalTo(GeoUtils.normalizeLon(-178.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000001.0), equalTo(GeoUtils.normalizeLon(-001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000000.0), equalTo(GeoUtils.normalizeLon(+000.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000001.0), equalTo(GeoUtils.normalizeLon(+001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000002.0), equalTo(GeoUtils.normalizeLon(+002.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000178.0), equalTo(GeoUtils.normalizeLon(+178.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000179.0), equalTo(GeoUtils.normalizeLon(+179.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000180.0), equalTo(GeoUtils.normalizeLon(+180.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000181.0), equalTo(GeoUtils.normalizeLon(+181.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000091.0), equalTo(GeoUtils.normalizeLat(-091.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000090.0), equalTo(GeoUtils.normalizeLat(-090.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000089.0), equalTo(GeoUtils.normalizeLat(-089.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000088.0), equalTo(GeoUtils.normalizeLat(-088.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000001.0), equalTo(GeoUtils.normalizeLat(-001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000000.0), equalTo(GeoUtils.normalizeLat(+000.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000001.0), equalTo(GeoUtils.normalizeLat(+001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000002.0), equalTo(GeoUtils.normalizeLat(+002.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000088.0), equalTo(GeoUtils.normalizeLat(+088.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000089.0), equalTo(GeoUtils.normalizeLat(+089.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000090.0), equalTo(GeoUtils.normalizeLat(+090.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000091.0), equalTo(GeoUtils.normalizeLat(+091.0)));
+ }
+
+ @Test
+ public void testPrefixTreeCellSizes() {
+ assertThat(GeoUtils.EARTH_SEMI_MAJOR_AXIS, equalTo(DistanceUtils.EARTH_EQUATORIAL_RADIUS_KM * 1000));
+ assertThat(GeoUtils.quadTreeCellWidth(0), lessThanOrEqualTo(GeoUtils.EARTH_EQUATOR));
+
+ SpatialContext spatialContext = new SpatialContext(true);
+
+ GeohashPrefixTree geohashPrefixTree = new GeohashPrefixTree(spatialContext, GeohashPrefixTree.getMaxLevelsPossible()/2);
+ Cell gNode = geohashPrefixTree.getWorldCell();
+
+ for(int i = 0; i<geohashPrefixTree.getMaxLevels(); i++) {
+ double width = GeoUtils.geoHashCellWidth(i);
+ double height = GeoUtils.geoHashCellHeight(i);
+ double size = GeoUtils.geoHashCellSize(i);
+ double degrees = 360.0 * width / GeoUtils.EARTH_EQUATOR;
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level "+i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level "+i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ gNode = gNode.getSubCells(null).iterator().next();
+ }
+
+ QuadPrefixTree quadPrefixTree = new QuadPrefixTree(spatialContext);
+ Cell qNode = quadPrefixTree.getWorldCell();
+ for (int i = 0; i < QuadPrefixTree.DEFAULT_MAX_LEVELS; i++) {
+
+ double degrees = 360.0/(1L<<i);
+ double width = GeoUtils.quadTreeCellWidth(i);
+ double height = GeoUtils.quadTreeCellHeight(i);
+ double size = GeoUtils.quadTreeCellSize(i);
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level "+i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level "+i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ qNode = qNode.getSubCells(null).iterator().next();
+ }
+ }
+
+ private static void assertNormalizedPoint(GeoPoint input, GeoPoint expected) {
+ GeoUtils.normalizePoint(input);
+ assertThat(input, equalTo(expected));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
new file mode 100644
index 0000000..65bb9fe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumberNestedSortingTests extends AbstractFieldDataTests {
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<Document>();
+ Document document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 1, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 2, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 2, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 1, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 3, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 4, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 4, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 5, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 5, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 6, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 6, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 7, Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 7, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 8, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ SortMode sortMode = SortMode.SUM;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ IndexFieldData.XFieldComparatorSource innerFieldComparator = createInnerFieldComparator("field2", sortMode, null);
+ Filter parentFilter = new TermFilter(new Term("__type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(11));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(11));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ childFilter = new TermFilter(new Term("filter_1", "T"));
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ query = new ToParentBlockJoinQuery(
+ new XFilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new FixedBitSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ innerFieldComparator = createInnerFieldComparator("field2", sortMode, 127);
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8));
+
+ innerFieldComparator = createInnerFieldComparator("field2", sortMode, -127);
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+
+ // Moved to method, because floating point based XFieldComparatorSource have different outcome for SortMode avg,
+ // than integral number based implementations...
+ assertAvgScoreMode(parentFilter, searcher, innerFieldComparator);
+ searcher.getIndexReader().close();
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+ protected abstract IndexableField createField(String name, int value, Field.Store store);
+
+ protected abstract IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue);
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
new file mode 100644
index 0000000..d34dd24
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.DoubleArrayIndexFieldData;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ DoubleArrayIndexFieldData fieldData = getForField(fieldName);
+ return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new DoubleField(name, value, store);
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
new file mode 100644
index 0000000..ec15feb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.FloatArrayIndexFieldData;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FloatNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ FloatArrayIndexFieldData fieldData = getForField(fieldName);
+ return new FloatValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new FloatField(name, value, store);
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ SortMode sortMode = SortMode.AVG;
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerFieldComparator, parentFilter, childFilter);
+ Query query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
new file mode 100644
index 0000000..33ab1d0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.PackedArrayIndexFieldData;
+
+/**
+ */
+public class LongNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createInnerFieldComparator(String fieldName, SortMode sortMode, Object missingValue) {
+ PackedArrayIndexFieldData fieldData = getForField(fieldName);
+ return new LongValuesComparatorSource(fieldData, missingValue, sortMode);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new LongField(name, value, store);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
new file mode 100644
index 0000000..c120ca9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermFilter;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.search.AndFilter;
+import org.elasticsearch.common.lucene.search.NotFilter;
+import org.elasticsearch.common.lucene.search.XFilteredQuery;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
+import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class NestedSortingTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes"));
+ }
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<Document>();
+ Document document = new Document();
+ document.add(new StringField("field2", "a", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "b", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "a", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "d", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "b", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "f", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "c", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "h", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "d", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "j", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "f", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "l", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "g", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "h", Field.Store.NO));
+ writer.addDocument(document);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "n", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "o", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "i", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ SortMode sortMode = SortMode.MIN;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ PagedBytesIndexFieldData indexFieldData = getForField("field2");
+ BytesRefFieldComparatorSource innerSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode);
+ Filter parentFilter = new TermFilter(new Term("__type", "parent"));
+ Filter childFilter = new NotFilter(parentFilter);
+ NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("c"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("e"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
+
+ sortMode = SortMode.MAX;
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("k"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("i"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("g"));
+
+
+ childFilter = new AndFilter(Arrays.asList(new NotFilter(parentFilter), new TermFilter(new Term("filter_1", "T"))));
+ nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
+ query = new ToParentBlockJoinQuery(
+ new XFilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new FixedBitSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("e"));
+
+ searcher.getIndexReader().close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
new file mode 100644
index 0000000..8bf8586
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.codec.CodecModule;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperServiceModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+
+public class SimilarityTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testResolveDefaultSimilarities() {
+ SimilarityLookupService similarityLookupService = similarityService().similarityLookupService();
+ assertThat(similarityLookupService.similarity("default"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("default").get(), instanceOf(DefaultSimilarity.class));
+ assertThat(similarityLookupService.similarity("BM25"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("BM25").get(), instanceOf(BM25Similarity.class));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_default() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "default")
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(DefaultSimilarityProvider.class));
+
+ DefaultSimilarity similarity = (DefaultSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_bm25() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "BM25")
+ .put("index.similarity.my_similarity.k1", 2.0f)
+ .put("index.similarity.my_similarity.b", 1.5f)
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(BM25SimilarityProvider.class));
+
+ BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getK1(), equalTo(2.0f));
+ assertThat(similarity.getB(), equalTo(1.5f));
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_DFR() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "DFR")
+ .put("index.similarity.my_similarity.basic_model", "g")
+ .put("index.similarity.my_similarity.after_effect", "l")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(DFRSimilarityProvider.class));
+
+ DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class));
+ assertThat(similarity.getAfterEffect(), instanceOf(AfterEffectL.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_IB() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "IB")
+ .put("index.similarity.my_similarity.distribution", "spl")
+ .put("index.similarity.my_similarity.lambda", "ttf")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = similarityService(indexSettings);
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().name("field1").mapper().similarity(), instanceOf(IBSimilarityProvider.class));
+
+ IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().name("field1").mapper().similarity().get();
+ assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class));
+ assertThat(similarity.getLambda(), instanceOf(LambdaTTF.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ private static SimilarityService similarityService() {
+ return similarityService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ private static SimilarityService similarityService(Settings settings) {
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder()
+ .add(new SettingsModule(settings))
+ .add(new IndexNameModule(index))
+ .add(new IndexSettingsModule(index, settings))
+ .add(new CodecModule(settings))
+ .add(new MapperServiceModule())
+ .add(new AnalysisModule(settings))
+ .add(new SimilarityModule(settings))
+ .add(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(CircuitBreakerService.class).to(DummyCircuitBreakerService.class);
+ }
+ })
+ .createInjector();
+ return injector.getInstance(SimilarityService.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java b/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java
new file mode 100644
index 0000000..528f052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/store/distributor/DistributorTests.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.distributor;
+
+import org.apache.lucene.store.*;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class DistributorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLeastUsedDistributor() throws Exception {
+ FakeFsDirectory[] directories = new FakeFsDirectory[]{
+ new FakeFsDirectory("dir0", 10L),
+ new FakeFsDirectory("dir1", 20L),
+ new FakeFsDirectory("dir2", 30L)
+ };
+ FakeDirectoryService directoryService = new FakeDirectoryService(directories);
+
+ LeastUsedDistributor distributor = new LeastUsedDistributor(directoryService);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[2]));
+ }
+
+ directories[2].setUsableSpace(5L);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[1]));
+ }
+
+ directories[1].setUsableSpace(0L);
+ for (int i = 0; i < 5; i++) {
+ assertThat(distributor.any(), equalTo((Directory) directories[0]));
+ }
+
+
+ directories[0].setUsableSpace(10L);
+ directories[1].setUsableSpace(20L);
+ directories[2].setUsableSpace(20L);
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) distributor.any()).incrementAllocationCount();
+ }
+ assertThat(directories[0].getAllocationCount(), equalTo(0));
+ assertThat((double) directories[1].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+
+ // Test failover scenario
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+ directories[0].setUsableSpace(0L);
+ directories[1].setUsableSpace(0L);
+ directories[2].setUsableSpace(0L);
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) distributor.any()).incrementAllocationCount();
+ }
+ for (FakeFsDirectory directory : directories) {
+ assertThat(directory.getAllocationCount(), greaterThan(0));
+ }
+ assertThat((double) directories[0].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+ assertThat((double) directories[1].getAllocationCount() / directories[2].getAllocationCount(), closeTo(1.0, 0.5));
+
+ }
+
+ @Test
+ public void testRandomWeightedDistributor() throws Exception {
+ FakeFsDirectory[] directories = new FakeFsDirectory[]{
+ new FakeFsDirectory("dir0", 10L),
+ new FakeFsDirectory("dir1", 20L),
+ new FakeFsDirectory("dir2", 30L)
+ };
+ FakeDirectoryService directoryService = new FakeDirectoryService(directories);
+
+ RandomWeightedDistributor randomWeightedDistributor = new RandomWeightedDistributor(directoryService);
+ for (int i = 0; i < 10000; i++) {
+ ((FakeFsDirectory) randomWeightedDistributor.any()).incrementAllocationCount();
+ }
+ for (FakeFsDirectory directory : directories) {
+ assertThat(directory.getAllocationCount(), greaterThan(0));
+ }
+ assertThat((double) directories[1].getAllocationCount() / directories[0].getAllocationCount(), closeTo(2.0, 0.5));
+ assertThat((double) directories[2].getAllocationCount() / directories[0].getAllocationCount(), closeTo(3.0, 0.5));
+
+ for (FakeFsDirectory directory : directories) {
+ directory.resetAllocationCount();
+ }
+
+ directories[1].setUsableSpace(0L);
+
+ for (int i = 0; i < 1000; i++) {
+ ((FakeFsDirectory) randomWeightedDistributor.any()).incrementAllocationCount();
+ }
+
+ assertThat(directories[0].getAllocationCount(), greaterThan(0));
+ assertThat(directories[1].getAllocationCount(), equalTo(0));
+ assertThat(directories[2].getAllocationCount(), greaterThan(0));
+
+ }
+
+ public static class FakeDirectoryService implements DirectoryService {
+
+ private final Directory[] directories;
+
+ public FakeDirectoryService(Directory[] directories) {
+ this.directories = directories;
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return directories;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ }
+
+ @Override
+ public void fullDelete(Directory dir) throws IOException {
+ }
+ }
+
+ public static class FakeFsDirectory extends FSDirectory {
+
+ public int allocationCount;
+
+ public FakeFile fakeFile;
+
+ public FakeFsDirectory(String path, long usableSpace) throws IOException {
+ super(new File(path), NoLockFactory.getNoLockFactory());
+ fakeFile = new FakeFile(path, usableSpace);
+ allocationCount = 0;
+ }
+
+ @Override
+ public IndexInput openInput(String name, IOContext context) throws IOException {
+ throw new UnsupportedOperationException("Shouldn't be called in the test");
+ }
+
+ public void setUsableSpace(long usableSpace) {
+ fakeFile.setUsableSpace(usableSpace);
+ }
+
+ public void incrementAllocationCount() {
+ allocationCount++;
+ }
+
+ public int getAllocationCount() {
+ return allocationCount;
+ }
+
+ public void resetAllocationCount() {
+ allocationCount = 0;
+ }
+
+ @Override
+ public File getDirectory() {
+ return fakeFile;
+ }
+ }
+
+ public static class FakeFile extends File {
+ private long usableSpace;
+
+ public FakeFile(String s, long usableSpace) {
+ super(s);
+ this.usableSpace = usableSpace;
+ }
+
+ @Override
+ public long getUsableSpace() {
+ return usableSpace;
+ }
+
+ public void setUsableSpace(long usableSpace) {
+ this.usableSpace = usableSpace;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java b/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java
new file mode 100644
index 0000000..a252b89
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/store/memory/SimpleByteBufferStoreTests.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store.memory;
+
+import org.apache.lucene.store.*;
+import org.apache.lucene.store.bytebuffer.ByteBufferDirectory;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleByteBufferStoreTests extends ElasticsearchTestCase {
+
+ @Test
+ public void test1BufferNoCache() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(1, 0, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 1);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test1Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(1, 10, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 1);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test3Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(3, 10, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 3);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test10Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(10, 20, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 10);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test15Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(15, 30, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 15);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void test40Buffer() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(40, 80, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+ insertData(dir, 40);
+ verifyData(dir);
+ dir.close();
+ cache.close();
+ }
+
+ @Test
+ public void testSimpleLocking() throws Exception {
+ ByteBufferCache cache = new ByteBufferCache(40, 80, true);
+ ByteBufferDirectory dir = new ByteBufferDirectory(cache);
+
+ Lock lock = dir.makeLock("testlock");
+
+ assertThat(lock.isLocked(), equalTo(false));
+ assertThat(lock.obtain(200), equalTo(true));
+ assertThat(lock.isLocked(), equalTo(true));
+ try {
+ assertThat(lock.obtain(200), equalTo(false));
+ assertThat("lock should be thrown", false, equalTo(true));
+ } catch (LockObtainFailedException e) {
+ // all is well
+ }
+ lock.release();
+ assertThat(lock.isLocked(), equalTo(false));
+ dir.close();
+ cache.close();
+ }
+
+ private void insertData(ByteBufferDirectory dir, int bufferSizeInBytes) throws IOException {
+ byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
+ IndexOutput indexOutput = dir.createOutput("value1", IOContext.DEFAULT);
+ indexOutput.writeBytes(new byte[]{2, 4, 6, 7, 8}, 5);
+ indexOutput.writeInt(-1);
+ indexOutput.writeLong(10);
+ indexOutput.writeInt(0);
+ indexOutput.writeInt(0);
+ indexOutput.writeBytes(test, 8);
+ indexOutput.writeBytes(test, 5);
+
+ indexOutput.seek(0);
+ indexOutput.writeByte((byte) 8);
+ if (bufferSizeInBytes > 4) {
+ indexOutput.seek(2);
+ indexOutput.writeBytes(new byte[]{1, 2}, 2);
+ }
+
+ indexOutput.close();
+ }
+
+ private void verifyData(ByteBufferDirectory dir) throws IOException {
+ byte[] test = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
+ assertThat(dir.fileExists("value1"), equalTo(true));
+ assertThat(dir.fileLength("value1"), equalTo(38l));
+
+ IndexInput indexInput = dir.openInput("value1", IOContext.DEFAULT);
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 8));
+ assertThat(indexInput.readInt(), equalTo(-1));
+ assertThat(indexInput.readLong(), equalTo((long) 10));
+ assertThat(indexInput.readInt(), equalTo(0));
+ assertThat(indexInput.readInt(), equalTo(0));
+ indexInput.readBytes(test, 0, 8);
+ assertThat(test[0], equalTo((byte) 1));
+ assertThat(test[7], equalTo((byte) 8));
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 1));
+ assertThat(test[4], equalTo((byte) 5));
+
+ indexInput.seek(28);
+ assertThat(indexInput.readByte(), equalTo((byte) 4));
+ indexInput.seek(30);
+ assertThat(indexInput.readByte(), equalTo((byte) 6));
+
+ indexInput.seek(0);
+ indexInput.readBytes(test, 0, 5);
+ assertThat(test[0], equalTo((byte) 8));
+
+ indexInput.close();
+
+ indexInput = dir.openInput("value1", IOContext.DEFAULT);
+ // iterate over all the data
+ for (int i = 0; i < 38; i++) {
+ indexInput.readByte();
+ }
+ indexInput.close();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java
new file mode 100644
index 0000000..b6c9190
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.apache.lucene.index.Term;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.hamcrest.MatcherAssert;
+import org.hamcrest.Matchers;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public abstract class AbstractSimpleTranslogTests {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected Translog translog;
+
+ @Before
+ public void setUp() {
+ translog = create();
+ translog.newTranslog(1);
+ }
+
+ @After
+ public void tearDown() {
+ translog.closeWithDelete();
+ }
+
+ protected abstract Translog create();
+
+ @Test
+ public void testRead() throws IOException {
+ Translog.Location loc1 = translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Location loc2 = translog.add(new Translog.Create("test", "2", new byte[]{2}));
+ assertThat(TranslogStreams.readSource(translog.read(loc1)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(TranslogStreams.readSource(translog.read(loc2)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ translog.sync();
+ assertThat(TranslogStreams.readSource(translog.read(loc1)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(TranslogStreams.readSource(translog.read(loc2)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ Translog.Location loc3 = translog.add(new Translog.Create("test", "2", new byte[]{3}));
+ assertThat(TranslogStreams.readSource(translog.read(loc3)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ translog.sync();
+ assertThat(TranslogStreams.readSource(translog.read(loc3)).source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ }
+
+ @Test
+ public void testTransientTranslog() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.newTransientTranslog(2);
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ translog.makeTransientCurrent();
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // now its one, since it only includes "2"
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+ }
+
+ @Test
+ public void testSimpleOperations() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ translog.add(new Translog.Delete(newUid("3")));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(3));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(3));
+ snapshot.release();
+
+ translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(4));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(4));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytes(), equalTo(new byte[]{1}));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{2}));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Delete delete = (Translog.Delete) snapshot.next();
+ assertThat(delete.uid(), equalTo(newUid("3")));
+
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) snapshot.next();
+ assertThat(deleteByQuery.source().toBytes(), equalTo(new byte[]{4}));
+
+ assertThat(snapshot.hasNext(), equalTo(false));
+
+ snapshot.release();
+
+ long firstId = translog.currentId();
+ translog.newTranslog(2);
+ assertThat(translog.currentId(), Matchers.not(equalTo(firstId)));
+
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(0));
+ snapshot.release();
+ }
+
+ @Test
+ public void testSnapshot() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.source().toBytes(), equalTo(new byte[]{1}));
+ snapshot.release();
+
+ Translog.Snapshot snapshot1 = translog.snapshot();
+ // we use the translogSize to also navigate to the last position on this snapshot
+ // so snapshot(Snapshot) will work properly
+ MatcherAssert.assertThat(snapshot1, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot1.estimatedTotalOperations(), equalTo(1));
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.snapshot(snapshot1);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+
+ snapshot = translog.snapshot(snapshot1);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{2}));
+ assertThat(snapshot.hasNext(), equalTo(false));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(2));
+ snapshot.release();
+ snapshot1.release();
+ }
+
+ @Test
+ public void testSnapshotWithNewTranslog() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Snapshot actualSnapshot = translog.snapshot();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+
+ translog.newTranslog(2);
+
+ translog.add(new Translog.Index("test", "3", new byte[]{3}));
+
+ snapshot = translog.snapshot(actualSnapshot);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot(actualSnapshot);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index.source().toBytes(), equalTo(new byte[]{3}));
+ assertThat(snapshot.hasNext(), equalTo(false));
+
+ actualSnapshot.release();
+ snapshot.release();
+ }
+
+ @Test
+ public void testSnapshotWithSeekForward() {
+ Translog.Snapshot snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.snapshot();
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ long lastPosition = snapshot.position();
+ snapshot.release();
+
+ translog.add(new Translog.Create("test", "2", new byte[]{1}));
+ snapshot = translog.snapshot();
+ snapshot.seekForward(lastPosition);
+ MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1));
+ snapshot.release();
+
+ snapshot = translog.snapshot();
+ snapshot.seekForward(lastPosition);
+ assertThat(snapshot.hasNext(), equalTo(true));
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create.id(), equalTo("2"));
+ snapshot.release();
+ }
+
+ private Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java b/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java
new file mode 100644
index 0000000..03cf701
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ *
+ */
+public class TranslogSizeMatcher extends TypeSafeMatcher<Translog.Snapshot> {
+
+ private final int size;
+
+ public TranslogSizeMatcher(int size) {
+ this.size = size;
+ }
+
+ @Override
+ public boolean matchesSafely(Translog.Snapshot snapshot) {
+ int count = 0;
+ while (snapshot.hasNext()) {
+ snapshot.next();
+ count++;
+ }
+ return size == count;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("a translog with size ").appendValue(size);
+ }
+
+ public static Matcher<Translog.Snapshot> translogSize(int size) {
+ return new TranslogSizeMatcher(size);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java
new file mode 100644
index 0000000..7802373
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.AbstractSimpleTranslogTests;
+import org.junit.AfterClass;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class FsBufferedTranslogTests extends AbstractSimpleTranslogTests {
+
+ @Override
+ protected Translog create() {
+ return new FsTranslog(shardId,
+ ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name()).build(),
+ new File("data/fs-translog"));
+ }
+
+ @AfterClass
+ public static void cleanup() {
+ FileSystemUtils.deleteRecursively(new File("data/fs-translog"), true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java
new file mode 100644
index 0000000..c826247
--- /dev/null
+++ b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog.fs;
+
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.AbstractSimpleTranslogTests;
+import org.junit.AfterClass;
+
+import java.io.File;
+
+/**
+ *
+ */
+public class FsSimpleTranslogTests extends AbstractSimpleTranslogTests {
+
+ @Override
+ protected Translog create() {
+ return new FsTranslog(shardId,
+ ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.SIMPLE.name()).build(),
+ new File("data/fs-translog"));
+ }
+
+ @AfterClass
+ public static void cleanup() {
+ FileSystemUtils.deleteRecursively(new File("data/fs-translog"), true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indexing/IndexActionTests.java b/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
new file mode 100644
index 0000000..5dbb954
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indexing;
+
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicIntegerArray;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class IndexActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testCreatedFlag() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertFalse(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ }
+
+ @Test
+ public void testCreatedFlagWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ flush();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreatedFlagParallelExecution() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ int threadCount = 20;
+ final int docCount = 300;
+ int taskCount = docCount * threadCount;
+
+ final AtomicIntegerArray createdCounts = new AtomicIntegerArray(docCount);
+ ExecutorService threadPool = Executors.newFixedThreadPool(threadCount);
+ List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(taskCount);
+ final Random random = getRandom();
+ for (int i=0;i< taskCount; i++ ) {
+ tasks.add(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ int docId = random.nextInt(docCount);
+ IndexResponse indexResponse = index("test", "type", Integer.toString(docId), "field1", "value");
+ if (indexResponse.isCreated()) createdCounts.incrementAndGet(docId);
+ return null;
+ }
+ });
+ }
+
+ threadPool.invokeAll(tasks);
+
+ for (int i=0;i<docCount;i++) {
+ assertThat(createdCounts.get(i), lessThanOrEqualTo(1));
+ }
+ }
+
+ @Test
+ public void testCreatedFlagWithExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(123)
+ .setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreateFlagWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertTrue(indexResponse.isCreated());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java b/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
new file mode 100644
index 0000000..3a8ebdf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indexlifecycle;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
+
+ @Slow
+ @Test
+ public void testIndexLifecycleActions() throws Exception {
+ if (randomBoolean()) { // both run with @Nightly
+ testIndexLifecycleActionsWith11Shards0Backup();
+ } else {
+ testIndexLifecycleActionsWith11Shards1Backup();
+ }
+ }
+
+ @Slow
+ @Nightly
+ @Test
+ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 11)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("cluster.routing.schedule", "20ms") // reroute every 20ms so we identify new nodes fast
+ .build();
+
+ // start one server
+ logger.info("Starting sever1");
+ final String server_1 = cluster().startNode(settings);
+ final String node1 = getLocalNodeId(server_1);
+
+ logger.info("Creating index [test]");
+ CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ RoutingNode routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server2");
+ // start another server
+ String server_2 = cluster().startNode(settings);
+
+ // first wait for 2 nodes in the cluster
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ setMinimumMasterNodes(2);
+ final String node2 = getLocalNodeId(server_2);
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+ RoutingNode routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server3");
+ // start another server
+ String server_3 = cluster().startNode(settings);
+
+ // first wait for 3 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+
+ final String node3 = getLocalNodeId(server_3);
+
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2, node3);
+
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ RoutingNode routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7));
+
+ logger.info("Closing server1");
+ // kill the first server
+ cluster().stopRandomNode(TestCluster.nameFilter(server_1));
+ // verify health
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ client().admin().cluster().prepareReroute().get();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+
+ logger.info("Deleting index [test]");
+ // last, lets delete the index
+ DeleteIndexResponse deleteIndexResponse = client().admin().indices().prepareDelete("test").execute().actionGet();
+ assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
+
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+ assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
+ }
+
+ private String getLocalNodeId(String name) {
+ Discovery discovery = cluster().getInstance(Discovery.class, name);
+ String nodeId = discovery.localNode().getId();
+ assertThat(nodeId, not(nullValue()));
+ return nodeId;
+ }
+
+ @Slow
+ @Nightly
+ @Test
+ public void testIndexLifecycleActionsWith11Shards0Backup() throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 11)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("cluster.routing.schedule", "20ms") // reroute every 20ms so we identify new nodes fast
+ .build();
+
+ // start one server
+ logger.info("Starting server1");
+ final String server_1 = cluster().startNode(settings);
+
+ final String node1 = getLocalNodeId(server_1);
+
+ logger.info("Creating index [test]");
+ CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1);
+ RoutingNode routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+
+ // start another server
+ logger.info("Starting server2");
+ final String server_2 = cluster().startNode(settings);
+
+ // first wait for 2 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ setMinimumMasterNodes(2);
+ final String node2 = getLocalNodeId(server_2);
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(6), equalTo(5)));
+ RoutingNode routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ // start another server
+ logger.info("Starting server3");
+ final String server_3 = cluster().startNode();
+
+ // first wait for 3 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ final String node3 = getLocalNodeId(server_3);
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2, node3);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ RoutingNode routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(4), equalTo(3)));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(4), equalTo(3)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(3));
+
+ logger.info("Closing server1");
+ // kill the first server
+ cluster().stopRandomNode(TestCluster.nameFilter(server_1));
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ client().admin().cluster().prepareReroute().get();
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(11));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), anyOf(equalTo(5), equalTo(6)));
+
+ logger.info("Deleting index [test]");
+ // last, lets delete the index
+ DeleteIndexResponse deleteIndexResponse = client().admin().indices().delete(deleteIndexRequest("test")).actionGet();
+ assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
+
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+ assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
+ }
+
+ private void assertNodesPresent(RoutingNodes routingNodes, String... nodes) {
+ final Set<String> keySet = Sets.newHashSet(Iterables.transform(routingNodes, new Function<RoutingNode, String>() {
+ @Override
+ public String apply(RoutingNode input) {
+ return input.nodeId();
+ }
+ }));
+ assertThat(keySet, containsInAnyOrder(nodes));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java b/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
new file mode 100644
index 0000000..77762e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+import static org.elasticsearch.common.settings.ImmutableSettings.builder;
+import static org.elasticsearch.index.shard.IndexShardState.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class IndicesLifecycleListenerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndexStateShardChanged() throws Throwable {
+
+ //start with a single node
+ String node1 = cluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode1 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ cluster().getInstance(IndicesLifecycle.class, node1).addListener(stateChangeListenerNode1);
+
+ //create an index
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 6, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ //new shards got started
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //add a node: 3 out of the 6 shards will be relocated to it
+ //disable allocation before starting a new node, as we need to register the listener first
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)));
+ String node2 = cluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ cluster().getInstance(IndicesLifecycle.class, node2).addListener(stateChangeListenerNode2);
+ //re-enable allocation
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, false)));
+ ensureGreen();
+
+ //the 3 relocated shards get closed on the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED);
+ //the 3 relocated shards get created on the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //increase replicas from 0 to 1
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder().put(SETTING_NUMBER_OF_REPLICAS, 1)));
+ ensureGreen();
+
+ //3 replicas are allocated to the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+ //3 replicas are allocated to the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //close the index
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED);
+ assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED);
+ }
+
+ private static void assertShardStatesMatch(final IndexShardStateChangeListener stateChangeListener, final int numShards, final IndexShardState... shardStates)
+ throws InterruptedException {
+
+ Predicate<Object> waitPredicate = new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ if (stateChangeListener.shardStates.size() != numShards) {
+ return false;
+ }
+ for (List<IndexShardState> indexShardStates : stateChangeListener.shardStates.values()) {
+ if (indexShardStates == null || indexShardStates.size() != shardStates.length) {
+ return false;
+ }
+ for (int i = 0; i < shardStates.length; i++) {
+ if (indexShardStates.get(i) != shardStates[i]) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ };
+ if (!awaitBusy(waitPredicate, 1, TimeUnit.MINUTES)) {
+ fail("failed to observe expect shard states\n" +
+ "expected: [" + numShards + "] shards with states: " + Strings.arrayToCommaDelimitedString(shardStates) + "\n" +
+ "observed:\n" + stateChangeListener);
+ }
+
+ stateChangeListener.shardStates.clear();
+ }
+
+ private static class IndexShardStateChangeListener extends IndicesLifecycle.Listener {
+ //we keep track of all the states (ordered) a shard goes through
+ final ConcurrentMap<ShardId, List<IndexShardState>> shardStates = Maps.newConcurrentMap();
+
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState newState, @Nullable String reason) {
+ List<IndexShardState> shardStates = this.shardStates.putIfAbsent(indexShard.shardId(),
+ new CopyOnWriteArrayList<IndexShardState>(new IndexShardState[]{newState}));
+ if (shardStates != null) {
+ shardStates.add(newState);
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<ShardId, List<IndexShardState>> entry : shardStates.entrySet()) {
+ sb.append(entry.getKey()).append(" --> ").append(Strings.collectionToCommaDelimitedString(entry.getValue())).append("\n");
+ }
+ return sb.toString();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java
new file mode 100644
index 0000000..357dbed
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java
@@ -0,0 +1,919 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusRequestBuilder;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.MultiSearchRequestBuilder;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.suggest.SuggestBuilder;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSpecifiedIndexUnavailable() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ ensureYellow();
+
+ // Verify defaults
+ verify(search("test1", "test2"), true);
+ verify(msearch(null, "test1", "test2"), true);
+ verify(count("test1", "test2"), true);
+ verify(clearCache("test1", "test2"), true);
+ verify(_flush("test1", "test2"),true);
+ verify(gatewatSnapshot("test1", "test2"), true);
+ verify(segments("test1", "test2"), true);
+ verify(stats("test1", "test2"), true);
+ verify(status("test1", "test2"), true);
+ verify(optimize("test1", "test2"), true);
+ verify(refresh("test1", "test2"), true);
+ verify(validateQuery("test1", "test2"), true);
+ verify(aliasExists("test1", "test2"), true);
+ verify(typesExists("test1", "test2"), true);
+ verify(deleteByQuery("test1", "test2"), true);
+ verify(percolate("test1", "test2"), true);
+ verify(mpercolate(null, "test1", "test2"), false);
+ verify(suggest("test1", "test2"), true);
+ verify(getAliases("test1", "test2"), true);
+ verify(getFieldMapping("test1", "test2"), true);
+ verify(getMapping("test1", "test2"), true);
+ verify(getWarmer("test1", "test2"), true);
+ verify(getSettings("test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strict();
+ verify(search("test1", "test2").setIndicesOptions(options), true);
+ verify(msearch(options, "test1", "test2"), true);
+ verify(count("test1", "test2").setIndicesOptions(options), true);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), true);
+ verify(_flush("test1", "test2").setIndicesOptions(options),true);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), true);
+ verify(segments("test1", "test2").setIndicesOptions(options), true);
+ verify(stats("test1", "test2").setIndicesOptions(options), true);
+ verify(status("test1", "test2").setIndicesOptions(options), true);
+ verify(optimize("test1", "test2").setIndicesOptions(options), true);
+ verify(refresh("test1", "test2").setIndicesOptions(options), true);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), true);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), true);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), true);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), true);
+ verify(percolate("test1", "test2").setIndicesOptions(options), true);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), true);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), true);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), true);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenient();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options), false);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(status("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strict();
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options),false);
+ verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(status("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testSpecifiedIndexUnavailable_snapshotRestore() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ ensureYellow();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ verify(snapshot("snap2", "test1", "test2"), true);
+ verify(restore("snap1", "test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strict();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), true);
+ verify(restore("snap1", "test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenient();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap2", "test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strict();
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(snapshot("snap3", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap3", "test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour() throws Exception {
+ // Verify defaults for wildcards, when specifying no indices (*, _all, /)
+ String[] indices = Strings.EMPTY_ARRAY;
+ verify(search(indices), false);
+ verify(msearch(null, indices), false);
+ verify(count(indices), false);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), true);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), true);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices), false);
+
+ // Now force allow_no_indices=true
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false);
+ verify(msearch(options, indices).setIndicesOptions(options), false);
+ verify(count(indices).setIndicesOptions(options), false);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(status(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(deleteByQuery(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("foobar"));
+ client().prepareIndex("foobar", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+
+ // Verify defaults for wildcards, with one wildcard expression and one existing index
+ indices = new String[]{"foo*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), false);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), false);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), false);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Verify defaults for wildcards, with two wildcard expression and one existing index
+ indices = new String[]{"foo*", "bar*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(gatewatSnapshot(indices), false);
+ verify(segments(indices), true);
+ verify(stats(indices), false);
+ verify(status(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(deleteByQuery(indices), true);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Now force allow_no_indices=true
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false, 1);
+ verify(msearch(options, indices).setIndicesOptions(options), false, 1);
+ verify(count(indices).setIndicesOptions(options), false, 1);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(status(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(deleteByQuery(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour_snapshotRestore() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true);
+ verify(restore("snap1", "foo*", "bar*").setIndicesOptions(options), true);
+
+ options = IndicesOptions.strict();
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap4", "foo*", "baz*").setIndicesOptions(options), true);
+ verify(restore("snap3", "foo*", "baz*").setIndicesOptions(options), true);
+ }
+
+ @Test
+ public void testAllMissing_lenient() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ client().prepareIndex("test1", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+ SearchResponse response = client().prepareSearch("test2")
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ response = client().prepareSearch("test2","test3").setQuery(matchAllQuery())
+ .setIndicesOptions(IndicesOptions.lenient())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ //you should still be able to run empty searches without things blowing up
+ response = client().prepareSearch()
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testAllMissing_strict() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ ensureYellow();
+ try {
+ client().prepareSearch("test2")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ try {
+ client().prepareSearch("test2","test3")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ //you should still be able to run empty searches without things blowing up
+ client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ }
+
+ @Test
+ // For now don't handle closed indices
+ public void testCloseApi_specifiedIndices() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(search("test1", "test2"), false);
+ verify(count("test1", "test2"), false);
+ assertAcked(client().admin().indices().prepareClose("test2").get());
+
+ try {
+ search("test1", "test2").get();
+ fail("Exception should have been thrown");
+ } catch (ClusterBlockException e) {
+ }
+ try {
+ count("test1", "test2").get();
+ fail("Exception should have been thrown");
+ } catch (ClusterBlockException e) {
+ }
+
+ verify(search(), false);
+ verify(count(), false);
+
+ verify(search("t*"), false);
+ verify(count("t*"), false);
+ }
+
+ @Test
+ public void testCloseApi_wildcards() throws Exception {
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareClose("bar*"), false);
+ verify(client().admin().indices().prepareClose("bar*"), true);
+
+ verify(client().admin().indices().prepareClose("foo*"), false);
+ verify(client().admin().indices().prepareClose("foo*"), true);
+ verify(client().admin().indices().prepareClose("_all"), true);
+
+ verify(client().admin().indices().prepareOpen("bar*"), false);
+ verify(client().admin().indices().prepareOpen("_all"), false);
+ verify(client().admin().indices().prepareOpen("_all"), true);
+ }
+
+ @Test
+ public void testDeleteIndex() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo"), true);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ verify(client().admin().indices().prepareDelete("foobar"), false);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteIndex_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDelete("_all"), false);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(true));
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+
+ verify(client().admin().indices().prepareDelete("_all"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMapping() throws Exception {
+ assertAcked(prepareCreate("foobar").addMapping("type1", "field", "type=string"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteMapping("foo").setType("type1"), true);
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(true));
+ verify(client().admin().indices().prepareDeleteMapping("foobar").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteMapping_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), true);
+
+ assertAcked(prepareCreate("foo").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("foobar").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("bar").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("barbaz").addMapping("type1", "field", "type=string"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type1").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type1").get().isExists(), equalTo(true));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), true);
+
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), false);
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type1").get().isExists(), equalTo(false));
+ }
+
+
+ @Test
+ public void testPutWarmer() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutWarmer_wildcard() throws Exception {
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+
+ verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutAlias() throws Exception {
+ assertAcked(prepareCreate("foobar"));
+ ensureYellow();
+ verify(client().admin().indices().prepareAliases().addAlias("foobar", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+
+ }
+
+ @Test
+ public void testPutAlias_wildcard() throws Exception {
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareAliases().addAlias("foo*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(false));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(false));
+
+ verify(client().admin().indices().prepareAliases().addAlias("*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(true));
+
+ }
+ @Test
+ public void testDeleteMapping_typeWildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type1"), true);
+
+ assertAcked(prepareCreate("foo").addMapping("type1", "field", "type=string"));
+ assertAcked(prepareCreate("foobar").addMapping("type2", "field", "type=string"));
+ assertAcked(prepareCreate("bar").addMapping("type3", "field", "type=string"));
+ assertAcked(prepareCreate("barbaz").addMapping("type4", "field", "type=string"));
+
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type2").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(true));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type*"), false);
+ assertThat(client().admin().indices().prepareTypesExists("foo").setTypes("type1").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("foobar").setTypes("type2").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(true));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteMapping("foo*").setType("type1"), true);
+
+ verify(client().admin().indices().prepareDeleteMapping("_all").setType("type3","type4"), false);
+ assertThat(client().admin().indices().prepareTypesExists("bar").setTypes("type3").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareTypesExists("barbaz").setTypes("type4").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteWarmer() throws Exception {
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"typ1"}, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void testDeleteWarmer_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true);
+
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"type1"}, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true);
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false)
+ public void testIndicesExists() throws Exception {
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenient()).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testPutMapping() throws Exception {
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true);
+ verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("b*").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type1"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("_all").setType("type2").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type2"), notNullValue());
+ verify(client().admin().indices().preparePutMapping().setType("type3").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type3"), notNullValue());
+
+
+ verify(client().admin().indices().preparePutMapping("c*").setType("type1").setSource("field", "type=string"), true);
+
+ assertAcked(client().admin().indices().prepareClose("barbaz").get());
+ verify(client().admin().indices().preparePutMapping("barbaz").setType("type4").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type4"), notNullValue());
+ }
+
+ @Test
+ public void testUpdateSettings() throws Exception {
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+
+ assertAcked(prepareCreate("foo"));
+ assertAcked(prepareCreate("foobar"));
+ assertAcked(prepareCreate("bar"));
+ assertAcked(prepareCreate("barbaz"));
+ ensureYellow();
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(ImmutableSettings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("bar*").setSettings(ImmutableSettings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(ImmutableSettings.builder().put("c", "d")), false);
+
+ GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("foo").get();
+ assertThat(settingsResponse.getSetting("foo", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("bar*").get();
+ assertThat(settingsResponse.getSetting("bar", "index.a"), equalTo("b"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("_all").get();
+ assertThat(settingsResponse.getSetting("foo", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("foobar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("bar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.c"), equalTo("d"));
+
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ try {
+ verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(ImmutableSettings.builder().put("e", "f")), false);
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices[[barbaz]]"));
+ }
+ verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(ImmutableSettings.builder().put("a", "b")), true);
+ }
+
+ private static SearchRequestBuilder search(String... indices) {
+ return client().prepareSearch(indices).setQuery(matchAllQuery());
+ }
+
+ private static MultiSearchRequestBuilder msearch(IndicesOptions options, String... indices) {
+ MultiSearchRequestBuilder multiSearchRequestBuilder = client().prepareMultiSearch();
+ if (options != null) {
+ multiSearchRequestBuilder.setIndicesOptions(options);
+ }
+ return multiSearchRequestBuilder.add(client().prepareSearch(indices).setQuery(matchAllQuery()));
+ }
+
+ private static CountRequestBuilder count(String... indices) {
+ return client().prepareCount(indices).setQuery(matchAllQuery());
+ }
+
+ private static ClearIndicesCacheRequestBuilder clearCache(String... indices) {
+ return client().admin().indices().prepareClearCache(indices);
+ }
+
+ private static FlushRequestBuilder _flush(String... indices) {
+ return client().admin().indices().prepareFlush(indices);
+ }
+
+ private static GatewaySnapshotRequestBuilder gatewatSnapshot(String... indices) {
+ return client().admin().indices().prepareGatewaySnapshot(indices);
+ }
+
+ private static IndicesSegmentsRequestBuilder segments(String... indices) {
+ return client().admin().indices().prepareSegments(indices);
+ }
+
+ private static IndicesStatsRequestBuilder stats(String... indices) {
+ return client().admin().indices().prepareStats(indices);
+ }
+
+ private static IndicesStatusRequestBuilder status(String... indices) {
+ return client().admin().indices().prepareStatus(indices);
+ }
+
+ private static OptimizeRequestBuilder optimize(String... indices) {
+ return client().admin().indices().prepareOptimize(indices);
+ }
+
+ private static RefreshRequestBuilder refresh(String... indices) {
+ return client().admin().indices().prepareRefresh(indices);
+ }
+
+ private static ValidateQueryRequestBuilder validateQuery(String... indices) {
+ return client().admin().indices().prepareValidateQuery(indices);
+ }
+
+ private static AliasesExistRequestBuilder aliasExists(String... indices) {
+ return client().admin().indices().prepareAliasesExist("dummy").addIndices(indices);
+ }
+
+ private static TypesExistsRequestBuilder typesExists(String... indices) {
+ return client().admin().indices().prepareTypesExists(indices).setTypes("dummy");
+ }
+
+ private static DeleteByQueryRequestBuilder deleteByQuery(String... indices) {
+ return client().prepareDeleteByQuery(indices).setQuery(boolQuery().mustNot(matchAllQuery()));
+ }
+
+ private static PercolateRequestBuilder percolate(String... indices) {
+ return client().preparePercolate().setIndices(indices)
+ .setSource(new PercolateSourceBuilder().setDoc(docBuilder().setDoc("k", "v")))
+ .setDocumentType("type");
+ }
+
+ private static MultiPercolateRequestBuilder mpercolate(IndicesOptions options, String... indices) {
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ if (options != null) {
+ builder.setIndicesOptions(options);
+ }
+ return builder.add(percolate(indices));
+ }
+
+ private static SuggestRequestBuilder suggest(String... indices) {
+ return client().prepareSuggest(indices).addSuggestion(SuggestBuilder.termSuggestion("name").field("a"));
+ }
+
+ private static GetAliasesRequestBuilder getAliases(String... indices) {
+ return client().admin().indices().prepareGetAliases("dummy").addIndices(indices);
+ }
+
+ private static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) {
+ return client().admin().indices().prepareGetFieldMappings(indices);
+ }
+
+ private static GetMappingsRequestBuilder getMapping(String... indices) {
+ return client().admin().indices().prepareGetMappings(indices);
+ }
+
+ private static GetWarmersRequestBuilder getWarmer(String... indices) {
+ return client().admin().indices().prepareGetWarmers(indices);
+ }
+
+ private static GetSettingsRequestBuilder getSettings(String... indices) {
+ return client().admin().indices().prepareGetSettings(indices);
+ }
+
+ private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) {
+ return client().admin().cluster().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices);
+ }
+
+ private static RestoreSnapshotRequestBuilder restore(String name, String... indices) {
+ return client().admin().cluster().prepareRestoreSnapshot("dummy-repo", name)
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy-" + name)
+ .setWaitForCompletion(true)
+ .setIndices(indices);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail) {
+ verify(requestBuilder, fail, 0);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) {
+ if (fail) {
+ if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
+ } else {
+ try {
+ requestBuilder.get();
+ fail("IndexMissingException was expected");
+ } catch (IndexMissingException e) {}
+ }
+ } else {
+ if (requestBuilder instanceof SearchRequestBuilder) {
+ SearchRequestBuilder searchRequestBuilder = (SearchRequestBuilder) requestBuilder;
+ assertHitCount(searchRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof CountRequestBuilder) {
+ CountRequestBuilder countRequestBuilder = (CountRequestBuilder) requestBuilder;
+ assertHitCount(countRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue());
+ } else {
+ requestBuilder.get();
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
new file mode 100644
index 0000000..fdb5ab0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.AnalysisModule;
+
+/**
+ */
+public class DummyAnalysisBinderProcessor extends AnalysisModule.AnalysisBinderProcessor {
+
+ @Override
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+ analyzersBindings.processAnalyzer("dummy", DummyAnalyzerProvider.class);
+ }
+
+ @Override
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+ tokenFiltersBindings.processTokenFilter("dummy_token_filter", DummyTokenFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+ tokenizersBindings.processTokenizer("dummy_tokenizer", DummyTokenizerFactory.class);
+ }
+
+ @Override
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+ charFiltersBindings.processCharFilter("dummy_char_filter", DummyCharFilterFactory.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
new file mode 100644
index 0000000..55d22eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+
+import java.util.Collection;
+
+public class DummyAnalysisPlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "analysis-dummy";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "Analysis Dummy Plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ return ImmutableList.<Class<? extends Module>>of(DummyIndicesAnalysisModule.class);
+ }
+
+ public void onModule(AnalysisModule module) {
+ module.addProcessor(new DummyAnalysisBinderProcessor());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
new file mode 100644
index 0000000..d413096
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.util.Version;
+
+import java.io.Reader;
+
+public class DummyAnalyzer extends StopwordAnalyzerBase {
+
+ protected DummyAnalyzer(Version version) {
+ super(version);
+ }
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
new file mode 100644
index 0000000..0c4b48b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.analysis.AnalyzerProvider;
+import org.elasticsearch.index.analysis.AnalyzerScope;
+
+public class DummyAnalyzerProvider implements AnalyzerProvider<DummyAnalyzer> {
+ @Override
+ public String name() {
+ return "dummy";
+ }
+
+ @Override
+ public AnalyzerScope scope() {
+ return AnalyzerScope.INDICES;
+ }
+
+ @Override
+ public DummyAnalyzer get() {
+ return new DummyAnalyzer(Lucene.ANALYZER_VERSION);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
new file mode 100644
index 0000000..8c5896e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.CharFilterFactory;
+
+import java.io.Reader;
+
+public class DummyCharFilterFactory implements CharFilterFactory {
+ @Override
+ public String name() {
+ return "dummy_char_filter";
+ }
+
+ @Override
+ public Reader create(Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
new file mode 100644
index 0000000..c48edb1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.*;
+
+public class DummyIndicesAnalysis extends AbstractComponent {
+
+ @Inject
+ public DummyIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {
+ super(settings);
+ indicesAnalysisService.analyzerProviderFactories().put("dummy",
+ new PreBuiltAnalyzerProviderFactory("dummy", AnalyzerScope.INDICES,
+ new DummyAnalyzer(Lucene.ANALYZER_VERSION)));
+ indicesAnalysisService.tokenFilterFactories().put("dummy_token_filter",
+ new PreBuiltTokenFilterFactoryFactory(new DummyTokenFilterFactory()));
+ indicesAnalysisService.charFilterFactories().put("dummy_char_filter",
+ new PreBuiltCharFilterFactoryFactory(new DummyCharFilterFactory()));
+ indicesAnalysisService.tokenizerFactories().put("dummy_tokenizer",
+ new PreBuiltTokenizerFactoryFactory(new DummyTokenizerFactory()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
new file mode 100644
index 0000000..9d14f67
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class DummyIndicesAnalysisModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(DummyIndicesAnalysis.class).asEagerSingleton();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
new file mode 100644
index 0000000..489e4dc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+
+public class DummyTokenFilterFactory implements TokenFilterFactory {
+ @Override public String name() {
+ return "dummy_token_filter";
+ }
+
+ @Override public TokenStream create(TokenStream tokenStream) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
new file mode 100644
index 0000000..95c6a5e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.elasticsearch.index.analysis.TokenizerFactory;
+
+import java.io.Reader;
+
+public class DummyTokenizerFactory implements TokenizerFactory {
+ @Override
+ public String name() {
+ return "dummy_tokenizer";
+ }
+
+ @Override
+ public Tokenizer create(Reader reader) {
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java b/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
new file mode 100644
index 0000000..af5df68
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("plugin.types", DummyAnalysisPlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception {
+ Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers = Maps.newHashMap();
+
+ List<String> indexNames = Lists.newArrayList();
+ for (int i = 0; i < 10; i++) {
+ String indexName = randomAsciiOfLength(10).toLowerCase(Locale.ROOT);
+ indexNames.add(indexName);
+
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion();
+ if (!loadedAnalyzers.containsKey(preBuiltAnalyzer)) {
+ loadedAnalyzers.put(preBuiltAnalyzer, Lists.<Version>newArrayList());
+ }
+ loadedAnalyzers.get(preBuiltAnalyzer).add(randomVersion);
+
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", name)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
+ client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
+ }
+
+ ensureGreen();
+
+ // index some amount of data
+ for (int i = 0; i < 100; i++) {
+ String randomIndex = indexNames.get(randomInt(indexNames.size()-1));
+ String randomId = randomInt() + "";
+
+ Map<String, Object> data = Maps.newHashMap();
+ data.put("foo", randomAsciiOfLength(50));
+
+ index(randomIndex, "type", randomId, data);
+ }
+
+ refresh();
+
+ // close some of the indices
+ int amountOfIndicesToClose = randomInt(10-1);
+ for (int i = 0; i < amountOfIndicesToClose; i++) {
+ String indexName = indexNames.get(i);
+ client().admin().indices().prepareClose(indexName).execute().actionGet();
+ }
+
+ ensureGreen();
+
+ // check that all above configured analyzers have been loaded
+ assertThatAnalyzersHaveBeenLoaded(loadedAnalyzers);
+
+ // check that all of the prebuiltanalyzers are still open
+ assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);
+ }
+
+ /**
+ * Test case for #5030: Upgrading analysis plugins fails
+ * See https://github.com/elasticsearch/elasticsearch/issues/5030
+ */
+ @Test
+ public void testThatPluginAnalyzersCanBeUpdated() throws Exception {
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "dummy")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "my_dummy")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = ImmutableSettings.builder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion())
+ .put("index.analysis.analyzer.my_dummy.type", "custom")
+ .put("index.analysis.analyzer.my_dummy.filter", "my_dummy_token_filter")
+ .put("index.analysis.analyzer.my_dummy.char_filter", "my_dummy_char_filter")
+ .put("index.analysis.analyzer.my_dummy.tokenizer", "my_dummy_tokenizer")
+ .put("index.analysis.tokenizer.my_dummy_tokenizer.type", "dummy_tokenizer")
+ .put("index.analysis.filter.my_dummy_token_filter.type", "dummy_token_filter")
+ .put("index.analysis.char_filter.my_dummy_char_filter.type", "dummy_char_filter")
+ .build();
+
+ client().admin().indices().prepareCreate("test-analysis-dummy").addMapping("type", mapping).setSettings(versionSettings).get();
+
+ ensureGreen();
+ }
+
+ private void assertThatAnalyzersHaveBeenLoaded(Map<PreBuiltAnalyzers, List<Version>> expectedLoadedAnalyzers) {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> entry : expectedLoadedAnalyzers.entrySet()) {
+ for (Version version : entry.getValue()) {
+ // if it is not null in the cache, it has been loaded
+ assertThat(entry.getKey().getCache().get(version), is(notNullValue()));
+ }
+ }
+ }
+
+ // the close() method of a lucene analyzer sets the storedValue field to null
+ // we simply check this via reflection - ugly but works
+ private void assertLuceneAnalyzersAreNotClosed(Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers) throws IllegalAccessException, NoSuchFieldException {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> preBuiltAnalyzerEntry : loadedAnalyzers.entrySet()) {
+ PreBuiltAnalyzers preBuiltAnalyzer = preBuiltAnalyzerEntry.getKey();
+ for (Version version : preBuiltAnalyzerEntry.getValue()) {
+ Analyzer analyzer = preBuiltAnalyzerEntry.getKey().getCache().get(version);
+
+ Field field = getFieldFromClass("storedValue", analyzer);
+ boolean currentAccessible = field.isAccessible();
+ field.setAccessible(true);
+ Object storedValue = field.get(analyzer);
+ field.setAccessible(currentAccessible);
+
+ assertThat(String.format(Locale.ROOT, "Analyzer %s in version %s seems to be closed", preBuiltAnalyzer.name(), version), storedValue, is(notNullValue()));
+ }
+ }
+ }
+
+ /**
+ * Searches for a field until it finds, loops through all superclasses
+ */
+ private Field getFieldFromClass(String fieldName, Object obj) {
+ Field field = null;
+ boolean storedValueFieldFound = false;
+ Class clazz = obj.getClass();
+ while (!storedValueFieldFound) {
+ try {
+ field = clazz.getDeclaredField(fieldName);
+ storedValueFieldFound = true;
+ } catch (NoSuchFieldException e) {
+ clazz = clazz.getSuperclass();
+ }
+
+ if (Object.class.equals(clazz)) throw new RuntimeException("Could not find storedValue field in class" + clazz);
+ }
+
+ return field;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
new file mode 100644
index 0000000..f33ce30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class AnalyzeActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleAnalyzerTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("test", "this is a test").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
+ assertThat(token.getTerm(), equalTo("this"));
+ assertThat(token.getStartOffset(), equalTo(0));
+ assertThat(token.getEndOffset(), equalTo(4));
+ token = analyzeResponse.getTokens().get(1);
+ assertThat(token.getTerm(), equalTo("is"));
+ assertThat(token.getStartOffset(), equalTo(5));
+ assertThat(token.getEndOffset(), equalTo(7));
+ token = analyzeResponse.getTokens().get(2);
+ assertThat(token.getTerm(), equalTo("a"));
+ assertThat(token.getStartOffset(), equalTo(8));
+ assertThat(token.getEndOffset(), equalTo(9));
+ token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ }
+ }
+
+ @Test
+ public void analyzeNumericField() throws ElasticsearchException, IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ createIndex("test");
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "test", "1")
+ .setSource(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("long", 1l)
+ .field("double", 1.0d)
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+
+ try {
+ client().admin().indices().prepareAnalyze("test", "123").setField("long").execute().actionGet();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+ try {
+ client().admin().indices().prepareAnalyze("test", "123.0").setField("double").execute().actionGet();
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ }
+ }
+
+ @Test
+ public void analyzeWithNoIndex() throws Exception {
+
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setAnalyzer("simple").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("keyword").setTokenFilters("lowercase").execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
+ }
+
+ @Test
+ public void analyzerWithFieldOrTypeTests() throws Exception {
+
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test")
+ .setType("document").setSource(
+ "{\n" +
+ " \"document\":{\n" +
+ " \"properties\":{\n" +
+ " \"simple\":{\n" +
+ " \"type\":\"string\",\n" +
+ " \"analyzer\": \"simple\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}"
+ ).execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ final AnalyzeRequestBuilder requestBuilder = client().admin().indices().prepareAnalyze("test", "THIS IS A TEST");
+ requestBuilder.setField("document.simple");
+ AnalyzeResponse analyzeResponse = requestBuilder.execute().actionGet();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
new file mode 100644
index 0000000..e3a1587
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.apache.lucene.analysis.hunspell.HunspellDictionary;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.HunspellService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class HunspellServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testLocaleDirectoryWithNodeLevelConfig() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("indices.analysis.hunspell.dictionary.lazy", true)
+ .put("indices.analysis.hunspell.dictionary.ignore_case", true)
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ Version expectedVersion = Lucene.parseVersion(settings.get("indices.analysis.hunspell.version"), Lucene.ANALYZER_VERSION, logger);
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(true));
+ }
+
+ @Test
+ public void testLocaleDirectoryWithLocaleSpecificConfig() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("path.conf", getResource("/indices/analyze/conf_dir"))
+ .put("indices.analysis.hunspell.dictionary.lazy", true)
+ .put("indices.analysis.hunspell.dictionary.ignore_case", true)
+ .put("indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing", false)
+ .put("indices.analysis.hunspell.dictionary.en_US.ignore_case", false)
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ Version expectedVersion = Lucene.parseVersion(settings.get("indices.analysis.hunspell.version"), Lucene.ANALYZER_VERSION, logger);
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(false));
+
+
+ // testing that dictionary specific settings override node level settings
+ dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US_custom");
+ assertThat(dictionary, notNullValue());
+ assertThat(dictionary.getVersion(), equalTo(expectedVersion));
+ assertThat(dictionary.isIgnoreCase(), equalTo(true));
+ }
+
+ @Test
+ public void testCustomizeLocaleDirectory() throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("indices.analysis.hunspell.dictionary.location", getResource("/indices/analyze/conf_dir/hunspell"))
+ .build();
+
+ cluster().startNode(settings);
+ HunspellDictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/cache/CacheTests.java b/src/test/java/org/elasticsearch/indices/cache/CacheTests.java
new file mode 100644
index 0000000..7441a32
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/cache/CacheTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class CacheTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //Filter cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
+ return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("indices.cache.filter.clean_interval", "1ms").build();
+ }
+
+ @Test
+ public void testClearCacheFilterKeys() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet();
+ assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+ @Test
+ public void testFieldDataStats() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "field2", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "field2", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+
+ // now check the per field stats
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*")).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field"), lessThan(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes()));
+
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes()));
+
+ client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ }
+
+ @Test
+ public void testClearAllCaches() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data and filter to load filter cache
+ client().prepareSearch()
+ .setPostFilter(FilterBuilders.termFilter("field", "value1"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ client().prepareSearch()
+ .setPostFilter(FilterBuilders.termFilter("field", "value2"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().execute().actionGet();
+ Thread.sleep(100); // Make sure the filter cache entries have been removed...
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
new file mode 100644
index 0000000..778723b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.exists.types;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class TypesExistsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ Client client = client();
+ client.admin().indices().prepareCreate("test1")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareCreate("test2")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test1", "alias1").execute().actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster()
+ .prepareHealth("test1", "test2").setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ TypesExistsResponse response = client.admin().indices().prepareTypesExists("test1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type3").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type0").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java
new file mode 100644
index 0000000..2be096a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/CircuitBreakerServiceTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+
+/**
+ * Integration tests for InternalCircuitBreakerService
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest {
+
+ private String randomRidiculouslySmallLimit() {
+ // 3 different ways to say 100 bytes
+ return randomFrom(Arrays.asList("100b", "100"));
+ //, (10000. / JvmInfo.jvmInfo().getMem().getHeapMax().bytes()) + "%")); // this is prone to rounding errors and will fail if JVM memory changes!
+ }
+
+ @Test
+ @TestLogging("org.elasticsearch.indices.fielddata.breaker:TRACE,org.elasticsearch.index.fielddata:TRACE,org.elasticsearch.common.breaker:TRACE")
+ public void testMemoryBreaker() {
+ assertAcked(prepareCreate("cb-test", 1));
+ final Client client = client();
+
+ try {
+
+ // index some different terms so we have some field data for loading
+ int docCount = atLeast(300);
+ for (long id = 0; id < docCount; id++) {
+ client.prepareIndex("cb-test", "type", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ }
+
+ // refresh
+ refresh();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ client.prepareSearch("cb-test").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ client.admin().indices().prepareClearCache("cb-test").setFieldDataCache(true).execute().actionGet();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, randomRidiculouslySmallLimit())
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ try {
+ SearchResponse resp = client.prepareSearch("cb-test").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ } finally {
+ // Reset settings
+ Settings resetSettings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, "-1")
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, InternalCircuitBreakerService.DEFAULT_OVERHEAD_CONSTANT)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
+ }
+ }
+
+ @Test
+ @TestLogging("org.elasticsearch.indices.fielddata.breaker:TRACE,org.elasticsearch.index.fielddata:TRACE,org.elasticsearch.common.breaker:TRACE")
+ public void testRamAccountingTermsEnum() {
+ final Client client = client();
+
+ try {
+
+ // Create an index where the mappings have a field data filter
+ client.admin().indices().prepareCreate("ramtest").setSource("{\"mappings\": {\"type\": {\"properties\": {\"test\": " +
+ "{\"type\": \"string\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}").execute().actionGet();
+
+ // Wait 10 seconds for green
+ client.admin().cluster().prepareHealth("ramtest").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ // index some different terms so we have some field data for loading
+ int docCount = atLeast(300);
+ for (long id = 0; id < docCount; id++) {
+ client.prepareIndex("ramtest", "type", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ }
+
+ // refresh
+ refresh();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ client.prepareSearch("ramtest").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ client.admin().indices().prepareClearCache("ramtest").setFieldDataCache(true).execute().actionGet();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, randomRidiculouslySmallLimit())
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ try {
+ SearchResponse resp = client.prepareSearch("ramtest").setSource("{\"sort\": \"test\",\"query\":{\"match_all\":{}}}")
+ .execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ } finally {
+ // Reset settings
+ Settings resetSettings = settingsBuilder()
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, "-1")
+ .put(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, InternalCircuitBreakerService.DEFAULT_OVERHEAD_CONSTANT)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java
new file mode 100644
index 0000000..1b3271c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/DummyCircuitBreakerService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.elasticsearch.common.breaker.MemoryCircuitBreaker;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+/**
+ * Class that returns a breaker that never breaks
+ */
+public class DummyCircuitBreakerService implements CircuitBreakerService {
+
+ private final ESLogger logger = Loggers.getLogger(DummyCircuitBreakerService.class);
+
+ private final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(Long.MAX_VALUE), 0.0, logger);
+
+ public DummyCircuitBreakerService() {}
+
+ @Override
+ public MemoryCircuitBreaker getBreaker() {
+ return breaker;
+ }
+
+ @Override
+ public FieldDataBreakerStats stats() {
+ return new FieldDataBreakerStats(-1, -1, 0);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java b/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java
new file mode 100644
index 0000000..7be3274
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/fielddata/breaker/RandomExceptionCircuitBreakerTests.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.fielddata.breaker;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the circuit breaker while random exceptions are happening
+ */
+public class RandomExceptionCircuitBreakerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ final int numReplicas = randomIntBetween(0, 1);
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("test-str")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", randomBytesFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-str
+ .startObject("test-num")
+ // I don't use randomNumericType() here because I don't want "byte", and I want "float" and "double"
+ .field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer")))
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-num
+ .endObject() // properties
+ .endObject() // type
+ .endObject() // {}
+ .string();
+ final double topLevelRate;
+ final double lowLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0/between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 1.0/between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ ImmutableSettings.Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", numReplicas)
+ .put(MockInternalEngine.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockInternalEngine.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expect all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ } else {
+ numDocs = between(10, 100);
+ }
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ client().prepareIndex("test", "type", "" + i)
+ .setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ",
+ refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length,
+ refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = atLeast(50);
+ NodesStatsResponse resp = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : resp.getNodes()) {
+ assertThat("Breaker is set to 0", stats.getBreaker().getEstimated(), equalTo(0L));
+ }
+
+ for (int i = 0; i < numSearches; i++) {
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
+ switch(randomIntBetween(0, 5)) {
+ case 5:
+ case 4:
+ case 3:
+ searchRequestBuilder.addSort("test-str", SortOrder.ASC);
+ // fall through - sometimes get both fields
+ case 2:
+ case 1:
+ default:
+ searchRequestBuilder.addSort("test-num", SortOrder.ASC);
+
+ }
+ boolean success = false;
+ try {
+ // Sort by the string and numeric fields, to load them into field data
+ searchRequestBuilder.get();
+ success = true;
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+
+ if (frequently()) {
+ // Now, clear the cache and check that the circuit breaker has been
+ // successfully set back to zero. If there is a bug in the circuit
+ // breaker adjustment code, it should show up here by the breaker
+ // estimate being either positive or negative.
+ client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet();
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping, stats.getBreaker().getEstimated(), equalTo(0L));
+ }
+ }
+ }
+ }
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+ // TODO: Generalize this class and add it as a utility
+ public static class RandomExceptionDirectoryReaderWrapper extends MockInternalEngine.DirectoryReaderWrapper {
+ private final Settings settings;
+ static class ThrowingSubReaderWrapper extends SubReaderWrapper implements ThrowingAtomicReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public AtomicReader wrap(AtomicReader reader) {
+ return new ThrowingAtomicReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingAtomicReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ break;
+ case TermVectors:
+ break;
+ case Terms:
+ case TermsEnum:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ case Intersect:
+ break;
+ case Norms:
+ break;
+ case NumericDocValues:
+ break;
+ case BinaryDocValues:
+ break;
+ case SortedDocValues:
+ break;
+ case SortedSetDocValues:
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ public boolean wrapTerms(String field) {
+ return field.startsWith("test");
+ }
+ }
+
+
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
new file mode 100644
index 0000000..e7a0628
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.leaks;
+
+import org.apache.lucene.util.LuceneTestCase.BadApple;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.shard.service.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=1)
+public class IndicesLeaksTests extends ElasticsearchIntegrationTest {
+
+
+ @SuppressWarnings({"ConstantConditions", "unchecked"})
+ @Test
+ @BadApple
+ public void testIndexShardLifecycleLeak() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+
+ IndicesService indicesService = cluster().getInstance(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe("test");
+ Injector indexInjector = indexService.injector();
+ IndexShard shard = indexService.shardSafe(0);
+ Injector shardInjector = indexService.shardInjector(0);
+
+ performCommonOperations();
+
+ List<WeakReference> indexReferences = new ArrayList<WeakReference>();
+ List<WeakReference> shardReferences = new ArrayList<WeakReference>();
+
+ // TODO if we could iterate over the already created classes on the injector, we can just add them here to the list
+ // for now, we simple add some classes that make sense
+
+ // add index references
+ indexReferences.add(new WeakReference(indexService));
+ indexReferences.add(new WeakReference(indexInjector));
+ indexReferences.add(new WeakReference(indexService.mapperService()));
+ for (DocumentMapper documentMapper : indexService.mapperService()) {
+ indexReferences.add(new WeakReference(documentMapper));
+ }
+ indexReferences.add(new WeakReference(indexService.aliasesService()));
+ indexReferences.add(new WeakReference(indexService.analysisService()));
+ indexReferences.add(new WeakReference(indexService.fieldData()));
+ indexReferences.add(new WeakReference(indexService.queryParserService()));
+
+
+ // add shard references
+ shardReferences.add(new WeakReference(shard));
+ shardReferences.add(new WeakReference(shardInjector));
+
+ indexService = null;
+ indexInjector = null;
+ shard = null;
+ shardInjector = null;
+
+ client().admin().indices().prepareDelete().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ System.gc();
+ int indexNotCleared = 0;
+ for (WeakReference indexReference : indexReferences) {
+ if (indexReference.get() != null) {
+ indexNotCleared++;
+ }
+ }
+ int shardNotCleared = 0;
+ for (WeakReference shardReference : shardReferences) {
+ if (shardReference.get() != null) {
+ shardNotCleared++;
+ }
+ }
+ logger.info("round {}, indices {}/{}, shards {}/{}", i, indexNotCleared, indexReferences.size(), shardNotCleared, shardReferences.size());
+ if (indexNotCleared == 0 && shardNotCleared == 0) {
+ break;
+ }
+ }
+
+ //Thread.sleep(1000000);
+
+ for (WeakReference indexReference : indexReferences) {
+ assertThat("dangling index reference: " + indexReference.get(), indexReference.get(), nullValue());
+ }
+
+ for (WeakReference shardReference : shardReferences) {
+ assertThat("dangling shard reference: " + shardReference.get(), shardReference.get(), nullValue());
+ }
+ }
+
+ private void performCommonOperations() {
+ client().prepareIndex("test", "type", "1").setSource("field1", "value", "field2", 2, "field3", 3.0f).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.queryString("field1:value")).execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value")).execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
new file mode 100644
index 0000000..fccccbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.emptyIterable;
+
+public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest {
+
+ private final String mappingType = "test-mapping";
+
+ @Test // see #3544
+ public void testConcurrentDynamicMapping() throws Exception {
+ final String fieldName = "field";
+ final String mapping = "{ \"" + mappingType + "\": {" +
+ "\"dynamic_templates\": ["
+ + "{ \"" + fieldName + "\": {" + "\"path_match\": \"*\"," + "\"mapping\": {" + "\"type\": \"string\"," + "\"store\": \"yes\","
+ + "\"index\": \"analyzed\", \"analyzer\": \"whitespace\" } } } ] } }";
+ // The 'fieldNames' array is used to help with retrieval of index terms
+ // after testing
+
+ int iters = atLeast(5);
+ for (int i = 0; i < iters; i++) {
+ cluster().wipeIndices("test");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("number_of_shards", between(1, 5))
+ .put("number_of_replicas", between(0, 1)).build())
+ .addMapping(mappingType, mapping).execute().actionGet();
+ ensureYellow();
+ int numDocs = atLeast(10);
+ final CountDownLatch latch = new CountDownLatch(numDocs);
+ final List<Throwable> throwable = new CopyOnWriteArrayList<Throwable>();
+ int currentID = 0;
+ for (int j = 0; j < numDocs; j++) {
+ Map<String, Object> source = new HashMap<String, Object>();
+ source.put(fieldName, "test-user");
+ client().prepareIndex("test", mappingType, Integer.toString(currentID++)).setSource(source).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ throwable.add(e);
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ assertThat(throwable, emptyIterable());
+ refresh();
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test-user")).get(), numDocs);
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test user")).get(), 0);
+
+ }
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
new file mode 100644
index 0000000..bdac39d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ */
+@LuceneTestCase.Slow
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class DedicatedMasterGetFieldMappingTests extends SimpleGetFieldMappingsTests {
+
+ @Before
+ public void before1() {
+ Settings settings = settingsBuilder()
+ .put("node.data", false)
+ .build();
+ cluster().startNode(settings);
+ cluster().startNode(ImmutableSettings.EMPTY);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java
new file mode 100644
index 0000000..3499b88
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleDeleteMappingTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleDeleteMappingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleDeleteMapping() throws Exception {
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "test" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ ensureGreen();
+ refresh();
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(10l));
+ }
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(clusterState.metaData().index("test").mappings().containsKey("type1"), equalTo(true));
+
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("test").setTypes("type1").execute().actionGet();
+ assertThat(mappingsResponse.getMappings().get("test").get("type1"), notNullValue());
+
+ ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDeleteMapping().setIndices("test").setType("type1"));
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(0l));
+ }
+
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(clusterState.metaData().index("test").mappings().containsKey("type1"), equalTo(false));
+ mappingsResponse = client().admin().indices().prepareGetMappings("test").setTypes("type1").execute().actionGet();
+ assertThat(mappingsResponse.getMappings().get("test"), nullValue());
+ }
+
+
+ @Test
+ public void deleteMappingAllowNoBlankIndexAndNoEmptyStrings() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("index1").addMapping("1", "field1", "type=string").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").addMapping("1", "field1", "type=string").get());
+
+ // Should succeed, since no wildcards
+ client().admin().indices().prepareDeleteMapping("1index").setType("1").get();
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").setType("").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping().setType("1").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ try {
+ client().admin().indices().prepareDeleteMapping("").setType("1").get();
+ fail();
+ } catch (ActionRequestValidationException e) {}
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
new file mode 100644
index 0000000..1f65aa7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ ensureYellow();
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().get();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+
+ assertThat(response.fieldMappings("index", "type", "field"), Matchers.nullValue());
+ }
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("obj").startObject("properties").startObject("subfield").field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject()
+ .endObject().endObject().endObject();
+ }
+
+ @Test
+ public void simpleGetFieldMappings() throws Exception {
+
+ Settings.Builder settings = ImmutableSettings.settingsBuilder()
+ .put("number_of_shards", randomIntBetween(1, 3), "number_of_replicas", randomIntBetween(0, 1));
+
+ assertTrue(client().admin().indices().prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .setSettings(settings)
+ .get().isAcknowledged());
+ assertTrue(client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .setSettings(settings)
+ .get().isAcknowledged());
+
+ ensureYellow();
+
+ // Get mappings by full name
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.mappings().get("indexa"), not(hasKey("typeB")));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.mappings(), not(hasKey("indexb")));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // Get mappings by name
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple indices
+ response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
+
+ // get mappings by name across multiple types
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple types & indices
+ response = client().admin().indices().prepareGetFieldMappings().setFields("subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void simpleGetFieldMappingsWithDefaults() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type", getMappingForType("type")).get();
+
+ client().prepareIndex("test", "type", "1").setSource("num", 1).get();
+ ensureYellow();
+
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().setFields("num", "field1", "subfield").includeDefaults(true).get();
+
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("type", (Object) "long"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("index", (Object) "analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("type", (Object) "string"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string"));
+
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
new file mode 100644
index 0000000..2599833
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleGetMappingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().containsKey("index"), equalTo(true));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+ }
+
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject().endObject();
+ }
+
+
+ @Test
+ public void simpleGetMappings() throws Exception {
+ client().admin().indices().prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+ client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // Get all mappings
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings, via wildcard support
+ response = client().admin().indices().prepareGetMappings("*").setTypes("*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all typeA mappings in all indices
+ response = client().admin().indices().prepareGetMappings("*").setTypes("typeA").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+
+ // Get all mappings in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with A* in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").setTypes("A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+
+ // Get all mappings beginning with B* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with B* and A* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*", "A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(2));
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java
new file mode 100644
index 0000000..f2de136
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+public class UpdateMappingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void dynamicUpdates() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ int recCount = randomIntBetween(200, 600);
+ int numberOfTypes = randomIntBetween(1, 5);
+ List<IndexRequestBuilder> indexRequests = Lists.newArrayList();
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value"));
+ }
+ indexRandom(true, indexRequests);
+
+ logger.info("checking all the documents are there");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ CountResponse response = client().prepareCount("test").execute().actionGet();
+ assertThat(response.getCount(), equalTo((long) recCount));
+
+ logger.info("checking all the fields are in the mappings");
+
+ reRunTest:
+ while (true) {
+ Map<String, String> typeToSource = Maps.newHashMap();
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ for (ObjectObjectCursor<String, MappingMetaData> cursor : state.getMetaData().getIndices().get("test").getMappings()) {
+ typeToSource.put(cursor.key, cursor.value.source().string());
+ }
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ fieldName = "\"" + fieldName + "\""; // quote it, so we make sure we catch the exact one
+ if (!typeToSource.containsKey(type) || !typeToSource.get(type).contains(fieldName)) {
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ // its going to break, before we do, make sure that the cluster state hasn't changed on us...
+ ClusterState state2 = client().admin().cluster().prepareState().get().getState();
+ if (state.version() != state2.version()) {
+ logger.info("not the same version, used for test {}, new one {}, re-running test, first wait for mapping to wait", state.version(), state2.version());
+ continue reRunTest;
+ }
+ logger.info("failing, type {}, field {}, mapping {}", type, fieldName, typeToSource.get(type));
+ assertThat(typeToSource.get(type), containsString(fieldName));
+ }
+ }
+ break;
+ }
+ }
+
+ @Test
+ public void updateMappingWithoutType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"},\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test
+ public void updateMappingWithoutTypeMultiObjects() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithConflicts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithNormsConflicts() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": false }}}}}")
+ .execute().actionGet();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": true }}}}}")
+ .execute().actionGet();
+ }
+
+ /*
+ First regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
+ */
+ @Test
+ public void updateMappingWithIgnoredConflicts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}")
+ .setIgnoreConflicts(true)
+ .execute().actionGet();
+
+ //no changes since the only one had a conflict and was ignored, we return
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ /*
+ Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
+ */
+ @Test
+ public void updateMappingNoChanges() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+
+ //no changes, we return
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void updateIncludeExclude() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("normal").field("type", "long").endObject()
+ .startObject("exclude").field("type", "long").endObject()
+ .startObject("include").field("type", "long").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen(); // make sure that replicas are initialized so the refresh command will work them too
+
+ logger.info("Index doc");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 1).field("exclude", 1).field("include", 1)
+ .endObject()
+ );
+ refresh(); // commit it for later testing.
+
+
+ logger.info("Adding exclude settings");
+ PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").value("exclude").endArray()
+ .endObject().endObject()
+ ).get();
+
+ assertTrue(putResponse.isAcknowledged());
+
+ // changed mapping doesn't affect indexed documents (checking backward compatibility)
+ GetResponse getResponse = client().prepareGet("test", "type", "1").setRealtime(false).get();
+ assertThat(getResponse.getSource(), hasKey("normal"));
+ assertThat(getResponse.getSource(), hasKey("exclude"));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Index doc again");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 2).field("exclude", 1).field("include", 2)
+ .endObject()
+ );
+
+ // but do affect newly indexed docs
+ getResponse = get("test", "type", "1");
+ assertThat(getResponse.getSource(), hasKey("normal"));
+ assertThat(getResponse.getSource(), not(hasKey("exclude")));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Changing mapping to includes");
+ putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").endArray()
+ .startArray("includes").value("include").endArray()
+ .endObject().endObject()
+ ).get();
+ assertTrue(putResponse.isAcknowledged());
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ MappingMetaData typeMapping = getMappingsResponse.getMappings().get("test").get("type");
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes"));
+ ArrayList<String> includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes");
+ assertThat(includes, contains("include"));
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes"));
+ assertThat((ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable());
+
+
+ logger.info("Indexing doc yet again");
+ index("test", "type", "1", JsonXContent.contentBuilder().startObject()
+ .field("normal", 3).field("exclude", 3).field("include", 3)
+ .endObject()
+ );
+
+ getResponse = get("test", "type", "1");
+ assertThat(getResponse.getSource(), not(hasKey("normal")));
+ assertThat(getResponse.getSource(), not(hasKey("exclude")));
+ assertThat(getResponse.getSource(), hasKey("include"));
+
+
+ logger.info("Adding excludes, but keep includes");
+ putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ JsonXContent.contentBuilder().startObject().startObject("type")
+ .startObject("_source")
+ .startArray("excludes").value("*.excludes").endArray()
+ .endObject().endObject()
+ ).get();
+ assertTrue(putResponse.isAcknowledged());
+
+ getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ typeMapping = getMappingsResponse.getMappings().get("test").get("type");
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes"));
+ includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes");
+ assertThat(includes, contains("include"));
+ assertThat((Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes"));
+ ArrayList<String> excludes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes");
+ assertThat(excludes, contains("*.excludes"));
+
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void updateDefaultMappingSettings() throws Exception {
+
+ logger.info("Creating index with _default_ mappings");
+ client().admin().indices().prepareCreate("test").addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .field("date_detection", false)
+ .endObject().endObject()
+ ).get();
+
+ GetMappingsResponse getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ Map<String, Object> defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, hasKey("date_detection"));
+
+
+ logger.info("Emptying _default_ mappings");
+ // now remove it
+ PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done Emptying _default_ mappings");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, not(hasKey("date_detection")));
+
+ // now test you can change stuff that are normally unchangable
+ logger.info("Creating _default_ mappings with an analyzed field");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+
+
+ logger.info("Changing _default_ mappings field from analyzed to non-analyzed");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done changing _default_ mappings field from analyzed to non-analyzed");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ Map<String, Object> fieldSettings = (Map<String, Object>) ((Map) defaultMapping.get("properties")).get("f");
+ assertThat(fieldSettings, hasEntry("index", (Object) "not_analyzed"));
+
+ // but we still validate the _default_ type
+ logger.info("Confirming _default_ mappings validation");
+ assertThrows(client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "DOESNT_EXIST").endObject().endObject()
+ .endObject().endObject()
+ ), MapperParsingException.class);
+
+ }
+
+ @Test
+ public void updateMappingConcurrently() throws Throwable {
+ // Test that we can concurrently update different indexes and types.
+ int shardNo = Math.max(5, cluster().size());
+
+ prepareCreate("test1").setSettings("index.number_of_shards", shardNo).execute().actionGet();
+ prepareCreate("test2").setSettings("index.number_of_shards", shardNo).execute().actionGet();
+
+ // This is important. The test assumes all nodes are aware of all indices. Due to initializing shard throttling
+ // not all shards are allocated with the initial create index. Wait for it..
+ ensureYellow();
+
+ final Throwable[] threadException = new Throwable[1];
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] threads = new Thread[3];
+ final CyclicBarrier barrier = new CyclicBarrier(threads.length);
+ final ArrayList<Client> clientArray = new ArrayList<Client>();
+ for (Client c : clients()) {
+ clientArray.add(c);
+ }
+
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread(new Runnable() {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+
+ for (int i = 0; i < 100; i++) {
+ if (stop.get()) {
+ return;
+ }
+
+ Client client1 = clientArray.get(i % clientArray.size());
+ Client client2 = clientArray.get((i + 1) % clientArray.size());
+ String indexName = i % 2 == 0 ? "test2" : "test1";
+ String typeName = "type" + (i % 10);
+ String fieldName = Thread.currentThread().getName() + "_" + i;
+
+ PutMappingResponse response = client1.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(typeName)
+ .startObject("properties").startObject(fieldName).field("type", "string").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+
+ assertThat(response.isAcknowledged(), equalTo(true));
+ GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get();
+ ImmutableOpenMap<String, MappingMetaData> mappings = getMappingResponse.getMappings().get(indexName);
+ assertThat(mappings.containsKey(typeName), equalTo(true));
+ assertThat(((Map<String, Object>) mappings.get(typeName).getSourceAsMap().get("properties")).keySet(), Matchers.hasItem(fieldName));
+ }
+ } catch (Throwable t) {
+ threadException[0] = t;
+ stop.set(true);
+ }
+ }
+ });
+
+ threads[j].setName("t_" + j);
+ threads[j].start();
+ }
+
+ for (Thread t : threads) t.join();
+
+ if (threadException[0] != null) {
+ throw threadException[0];
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000..ff85b38
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void simpleUpdateNumberOfReplicasTests() throws Exception {
+ logger.info("Creating index test");
+ prepareCreate("test", 2).execute().actionGet();
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "test" + i)
+ .endObject()).get();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Increasing the number of replicas from 1 to 2");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet();
+ Thread.sleep(200);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(10).execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(10));
+
+ logger.info("starting another node to new replicas will be allocated to it");
+ allowNodes("test", 3);
+ Thread.sleep(100);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(15));
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Decreasing number of replicas from 2 to 0");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).get();
+ Thread.sleep(200);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(5));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(5));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 10);
+ }
+ }
+
+ @Test
+ public void testAutoExpandNumberOfReplicas0ToData() {
+ cluster().ensureAtMostNumNodes(2);
+ logger.info("--> creating index test with auto expand replicas");
+ prepareCreate("test", 2, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "0-all")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).setWaitForNodes(">=3").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> closing one node");
+ cluster().ensureAtMostNumNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).setWaitForNodes(">=2").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> closing another node");
+ cluster().ensureAtMostNumNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(2));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas1ToData() {
+ logger.info("--> creating index test with auto expand replicas");
+ cluster().ensureAtMostNumNodes(2);
+ prepareCreate("test", 2, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "1-all")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> closing one node");
+ cluster().ensureAtMostNumNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(4));
+
+ logger.info("--> closing another node");
+ cluster().ensureAtMostNumNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(2));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas2() {
+ logger.info("--> creating index test with auto expand replicas set to 0-2");
+ prepareCreate("test", 3, settingsBuilder().put("index.number_of_shards", 2).put("auto_expand_replicas", "0-2")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(6).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(6));
+
+ logger.info("--> add two more nodes");
+ allowNodes("test", 4);
+ allowNodes("test", 5);
+
+ logger.info("--> update the auto expand replicas to 0-3");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("auto_expand_replicas", "0-3")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(8).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(3));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(8));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
new file mode 100644
index 0000000..177933b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class UpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOpenCloseUpdateSettings() throws Exception {
+ createIndex("test");
+ try {
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // all is well
+ }
+
+ IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), nullValue());
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), nullValue());
+
+ // Now verify via dedicated get settings api:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), nullValue());
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), nullValue());
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("-1"));
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("-1"));
+
+ // now close the index, change the non dynamic setting, and see that it applies
+
+ // Wait for the index to turn green before attempting to close it
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.refresh_interval", "1s") // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("1s"));
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), equalTo("none"));
+
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("1s"));
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), equalTo("none"));
+ }
+
+ @Test
+ public void testEngineGCDeletesSetting() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "type", "1").setSource("f", 1).get(); // set version to 1
+ client().prepareDelete("test", "type", "1").get(); // sets version to 2
+ client().prepareIndex("test", "type", "1").setSource("f", 2).setVersion(2).get(); // delete is still in cache this should work & set version to 3
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.gc_deletes", 0)
+ ).get();
+
+ client().prepareDelete("test", "type", "1").get(); // sets version to 4
+ Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed.
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setVersion(4), VersionConflictEngineException.class); // delete is should not be in cache
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
new file mode 100644
index 0000000..8ce4ac3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope=Scope.TEST, numNodes=2)
+public class CloseIndexDisableCloseAllTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Combined multiple tests into one, because cluster scope is test.
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testCloseAllRequiresName() {
+ Settings clusterSettings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings));
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ // Close all explicitly
+ try {
+ client().admin().indices().prepareClose("_all").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("test*").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test2", "test3");
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State state, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(state));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
new file mode 100644
index 0000000..01f144a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class OpenCloseIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleCloseOpen() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleCloseMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleOpenMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testCloseOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareClose("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testCloseOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenient()).execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testOpenOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareOpen("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testOpenOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenient()).execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse1.isAcknowledged(), equalTo(true));
+ CloseIndexResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").execute().actionGet();
+ assertThat(closeIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("test3");
+
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").execute().actionGet();
+ assertThat(openIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "a");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("a");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "a");
+ }
+
+ @Test
+ public void testCloseOpenAll() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("_all").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenAllWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose(null).execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen(null).execute().actionGet();
+ }
+
+ @Test
+ public void testOpenAlreadyOpenedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //no problem if we try to open an index that's already in open state
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseAlreadyClosedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //closing the index
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ //no problem if we try to close an index that's already in close state
+ closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test
+ public void testSimpleCloseOpenAlias() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").execute().actionGet();
+ assertThat(aliasesResponse.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenAliasMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse1.isAcknowledged(), equalTo(true));
+ IndicesAliasesResponse aliasesResponse2 = client.admin().indices().prepareAliases().addAlias("test2", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse2.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2");
+ }
+
+ private void assertIndexIsOpened(String... indices) {
+ checkIndexState(IndexMetaData.State.OPEN, indices);
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State expectedState, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(expectedState));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
new file mode 100644
index 0000000..2eb4b27
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SimpleIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(SimpleIndexStateTests.class);
+
+ @Test
+ public void testSimpleOpenClose() {
+ logger.info("--> creating test index");
+ createIndex("test");
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+
+ logger.info("--> closing test index...");
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> testing indices status api...");
+ IndicesStatusResponse indicesStatusResponse = client().admin().indices().prepareStatus().get();
+ assertThat(indicesStatusResponse.getIndices().size(), equalTo(0));
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testFastCloseAfterCreateDoesNotClose() {
+ logger.info("--> creating test index that cannot be allocated");
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "no_such_node")
+ .put("index.number_of_replicas", 1).build()).get();
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth("test").setWaitForNodes(">=2").get();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
+
+ try {
+ client().admin().indices().prepareClose("test").get();
+ fail("Exception should have been thrown");
+ } catch(IndexPrimaryShardNotAllocatedException e) {
+ // expected
+ }
+
+ logger.info("--> updating test index settings to allow allocation");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "").build()).get();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(5));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testConsistencyAfterIndexCreationFailure() {
+
+ logger.info("--> deleting test index....");
+ try {
+ client().admin().indices().prepareDelete("test").get();
+ } catch (IndexMissingException ex) {
+ // Ignore
+ }
+
+ logger.info("--> creating test index with invalid settings ");
+ try {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", "bad")).get();
+ fail();
+ } catch (SettingsException ex) {
+ // Expected
+ }
+
+ logger.info("--> creating test index with valid settings ");
+ CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", 1)).get();
+ assertThat(response.isAcknowledged(), equalTo(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java
new file mode 100644
index 0000000..e09928c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/stats/SimpleIndexStatsTests.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.stats;
+
+import org.apache.lucene.util.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 2)
+public class SimpleIndexStatsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleStats() throws Exception {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getPrimaries().getDocs().getCount(), equalTo(3l));
+ assertThat(stats.getTotal().getDocs().getCount(), equalTo(6l));
+ assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexCount(), equalTo(3l));
+ assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(6l));
+ assertThat(stats.getTotal().getStore(), notNullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test1").getPrimaries().getDocs().getCount(), equalTo(2l));
+ assertThat(stats.getIndex("test1").getTotal().getDocs().getCount(), equalTo(4l));
+ assertThat(stats.getIndex("test1").getPrimaries().getStore(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getMerge(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getFlush(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test2").getPrimaries().getDocs().getCount(), equalTo(1l));
+ assertThat(stats.getIndex("test2").getTotal().getDocs().getCount(), equalTo(2l));
+
+ // make sure that number of requests in progress is 0
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getDeleteCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getFetchCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0l));
+
+ // check flags
+ stats = client().admin().indices().prepareStats().clear()
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ // check types
+ stats = client().admin().indices().prepareStats().setTypes("type1", "type").execute().actionGet();
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2"), nullValue());
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getDeleteCurrent(), equalTo(0l));
+
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(0l));
+ // check get
+ GetResponse getResponse = client().prepareGet("test1", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0l));
+
+ // missing get
+ getResponse = client().prepareGet("test1", "type1", "2").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(2l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(1l));
+
+ // clear all
+ stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+ }
+
+ @Test
+ public void testMergeStats() {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ // clear all
+ IndicesStatsResponse stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test1", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ client().admin().indices().prepareOptimize().setWaitForMerge(true).setMaxNumSegments(1).execute().actionGet();
+ stats = client().admin().indices().prepareStats()
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0l));
+ }
+
+ @Test
+ public void testSegmentsStats() {
+ prepareCreate("test1", 2).setSettings("index.number_of_shards", 5, "index.number_of_replicas", 1).get();
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ for (int i = 0; i < 20; i++) {
+ index("test1", "type1", Integer.toString(i), "field", "value");
+ index("test1", "type2", Integer.toString(i), "field", "value");
+ client().admin().indices().prepareFlush().get();
+ }
+ client().admin().indices().prepareOptimize().setWaitForMerge(true).setMaxNumSegments(1).execute().actionGet();
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+
+ assertThat(stats.getTotal().getSegments(), notNullValue());
+ assertThat(stats.getTotal().getSegments().getCount(), equalTo(10l));
+ assumeTrue(org.elasticsearch.Version.CURRENT.luceneVersion != Version.LUCENE_46);
+ assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0l));
+ }
+
+ @Test
+ public void testAllFlags() throws Exception {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+ createIndex("test2");
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ for (Flag flag : values) {
+ set(flag, builder, false);
+ }
+
+ IndicesStatsResponse stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ for (Flag flag : values) {
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+ Random random = getRandom();
+ EnumSet<Flag> flags = EnumSet.noneOf(Flag.class);
+ for (Flag flag : values) {
+ if (random.nextBoolean()) {
+ flags.add(flag);
+ }
+ }
+
+
+ for (Flag flag : values) {
+ set(flag, builder, false); // clear all
+ }
+
+ for (Flag flag : flags) { // set the flags
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : flags) { // check the flags
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+
+ for (Flag flag : EnumSet.complementOf(flags)) { // check the complement
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ }
+
+ @Test
+ public void testEncodeDecodeCommonStats() throws IOException {
+ CommonStatsFlags flags = new CommonStatsFlags();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ assertThat(flags.anySet(), equalTo(true));
+
+ for (Flag flag : values) {
+ flags.set(flag, false);
+ }
+ assertThat(flags.anySet(), equalTo(false));
+ for (Flag flag : values) {
+ flags.set(flag, true);
+ }
+ assertThat(flags.anySet(), equalTo(true));
+ Random random = getRandom();
+ flags.set(values[random.nextInt(values.length)], false);
+ assertThat(flags.anySet(), equalTo(true));
+
+ {
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(new BytesStreamInput(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+
+ {
+ for (Flag flag : values) {
+ flags.set(flag, random.nextBoolean());
+ }
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(new BytesStreamInput(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+ }
+
+ @Test
+ public void testFlagOrdinalOrder() {
+ Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
+ Flag.FilterCache, Flag.IdCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments, Flag.Translog};
+
+ assertThat(flags.length, equalTo(Flag.values().length));
+ for (int i = 0; i < flags.length; i++) {
+ assertThat("ordinal has changed - this breaks the wire protocol. Only append to new values", i, equalTo(flags[i].ordinal()));
+ }
+ }
+
+ private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean set) {
+ switch (flag) {
+ case Docs:
+ builder.setDocs(set);
+ break;
+ case FieldData:
+ builder.setFieldData(set);
+ break;
+ case FilterCache:
+ builder.setFilterCache(set);
+ break;
+ case Flush:
+ builder.setFlush(set);
+ break;
+ case Get:
+ builder.setGet(set);
+ break;
+ case IdCache:
+ builder.setIdCache(set);
+ break;
+ case Indexing:
+ builder.setIndexing(set);
+ break;
+ case Merge:
+ builder.setMerge(set);
+ break;
+ case Refresh:
+ builder.setRefresh(set);
+ break;
+ case Search:
+ builder.setSearch(set);
+ break;
+ case Store:
+ builder.setStore(set);
+ break;
+ case Warmer:
+ builder.setWarmer(set);
+ break;
+ case Percolate:
+ builder.setPercolate(set);
+ break;
+ case Completion:
+ builder.setCompletion(set);
+ break;
+ case Segments:
+ builder.setSegments(set);
+ break;
+ case Translog:
+ builder.setTranslog(set);
+ break;
+ default:
+ fail("new flag? " + flag);
+ break;
+ }
+ }
+
+ private static boolean isSet(Flag flag, CommonStats response) {
+ switch (flag) {
+ case Docs:
+ return response.getDocs() != null;
+ case FieldData:
+ return response.getFieldData() != null;
+ case FilterCache:
+ return response.getFilterCache() != null;
+ case Flush:
+ return response.getFlush() != null;
+ case Get:
+ return response.getGet() != null;
+ case IdCache:
+ return response.getIdCache() != null;
+ case Indexing:
+ return response.getIndexing() != null;
+ case Merge:
+ return response.getMerge() != null;
+ case Refresh:
+ return response.getRefresh() != null;
+ case Search:
+ return response.getSearch() != null;
+ case Store:
+ return response.getStore() != null;
+ case Warmer:
+ return response.getWarmer() != null;
+ case Percolate:
+ return response.getPercolate() != null;
+ case Completion:
+ return response.getCompletion() != null;
+ case Segments:
+ return response.getSegments() != null;
+ case Translog:
+ return response.getTranslog() != null;
+ default:
+ fail("new flag? " + flag);
+ return false;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
new file mode 100644
index 0000000..e7e409e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class IndicesStoreTests extends ElasticsearchIntegrationTest {
+ private static final Settings SETTINGS = settingsBuilder().put("gateway.type", "local").build();
+
+ @Test
+ public void shardsCleanup() throws Exception {
+ final String node_1 = cluster().startNode(SETTINGS);
+ final String node_2 = cluster().startNode(SETTINGS);
+ logger.info("--> creating index [test] with one shard and on replica");
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder().put("index.numberOfReplicas", 1).put("index.numberOfShards", 1))).actionGet();
+
+ logger.info("--> running cluster_health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+
+ logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(shardDirectory(node_2, "test", 0).exists(), equalTo(true));
+
+ logger.info("--> starting node server3");
+ String node_3 = cluster().startNode(SETTINGS);
+
+ logger.info("--> making sure that shard is not allocated on server3");
+ assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
+
+ File server2Shard = shardDirectory(node_2, "test", 0);
+ logger.info("--> stopping node node_2");
+ cluster().stopRandomNode(TestCluster.nameFilter(node_2));
+ assertThat(server2Shard.exists(), equalTo(true));
+
+ logger.info("--> running cluster_health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ logger.info("--> making sure that shard and its replica exist on server1, server2 and server3");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(server2Shard.exists(), equalTo(true));
+ assertThat(shardDirectory(node_3, "test", 0).exists(), equalTo(true));
+
+ logger.info("--> starting node node_4");
+ final String node_4 = cluster().startNode(SETTINGS);
+
+ logger.info("--> running cluster_health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ logger.info("--> making sure that shard and its replica are allocated on server1 and server3 but not on server2");
+ assertThat(shardDirectory(node_1, "test", 0).exists(), equalTo(true));
+ assertThat(shardDirectory(node_3, "test", 0).exists(), equalTo(true));
+ assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false));
+ }
+
+ private File shardDirectory(String server, String index, int shard) {
+ NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, server);
+ return env.shardLocations(new ShardId(index, shard))[0];
+ }
+
+ private boolean waitForShardDeletion(final String server, final String index, final int shard) throws InterruptedException {
+ awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return !shardDirectory(server, index, shard).exists();
+ }
+ });
+ return shardDirectory(server, index, shard).exists();
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java b/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java
new file mode 100644
index 0000000..e4ec98d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/SimpleDistributorTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.service.InternalIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
+
+ public final static String[] STORE_TYPES = {"fs", "simplefs", "niofs", "mmapfs"};
+
+ @Test
+ public void testAvailableSpaceDetection() {
+ for (String store : STORE_TYPES) {
+ createIndexWithStoreType("test", store, StrictDistributor.class.getCanonicalName());
+ }
+ }
+
+ @Test
+ public void testDirectoryToString() throws IOException {
+ createIndexWithStoreType("test", "niofs", "least_used");
+ String storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ File[] dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "niofs", "random");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(random[rate_limited(niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "mmapfs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(mmapfs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(mmapfs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "simplefs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[rate_limited(simplefs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), rate_limited(simplefs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(", type=MERGE, rate=20.0)])"));
+
+ createIndexWithStoreType("test", "memory", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString, equalTo("store(least_used[byte_buffer])"));
+
+ createIndexWithoutRateLimitingStoreType("test", "niofs", "least_used");
+ storeString = getStoreDirectory("test", 0).toString();
+ logger.info(storeString);
+ dataPaths = dataPaths();
+ assertThat(storeString.toLowerCase(Locale.ROOT), startsWith("store(least_used[niofs(" + dataPaths[0].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ if (dataPaths.length > 1) {
+ assertThat(storeString.toLowerCase(Locale.ROOT), containsString("), niofs(" + dataPaths[1].getAbsolutePath().toLowerCase(Locale.ROOT)));
+ }
+ assertThat(storeString, endsWith(")])"));
+ }
+
+ private void createIndexWithStoreType(String index, String storeType, String distributor) {
+ cluster().wipeIndices(index);
+ client().admin().indices().prepareCreate(index)
+ .setSettings(settingsBuilder()
+ .put("index.store.distributor", distributor)
+ .put("index.store.type", storeType)
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)
+ )
+ .execute().actionGet();
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+ private void createIndexWithoutRateLimitingStoreType(String index, String storeType, String distributor) {
+ cluster().wipeIndices(index);
+ client().admin().indices().prepareCreate(index)
+ .setSettings(settingsBuilder()
+ .put("index.store.distributor", distributor)
+ .put("index.store.type", storeType)
+ .put("index.store.throttle.type", "none")
+ .put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)
+ )
+ .execute().actionGet();
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+
+ private File[] dataPaths() {
+ Set<String> nodes = cluster().nodesInclude("test");
+ assertThat(nodes.isEmpty(), equalTo(false));
+ NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, nodes.iterator().next());
+ return env.nodeDataLocations();
+ }
+
+ private Directory getStoreDirectory(String index, int shardId) {
+ Set<String> nodes = cluster().nodesInclude("test");
+ assertThat(nodes.isEmpty(), equalTo(false));
+ IndicesService indicesService = cluster().getInstance(IndicesService.class, nodes.iterator().next());
+ InternalIndexShard indexShard = (InternalIndexShard) (indicesService.indexService(index).shard(shardId));
+ return indexShard.store().directory();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java b/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java
new file mode 100644
index 0000000..726e0cb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/store/StrictDistributor.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.distributor.AbstractDistributor;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ *
+ */
+public class StrictDistributor extends AbstractDistributor {
+
+ @Inject
+ public StrictDistributor(DirectoryService directoryService) throws IOException {
+ super(directoryService);
+ }
+
+ @Override
+ public Directory doAny() {
+ for (Directory delegate : delegates) {
+ assertThat(getUsableSpace(delegate), greaterThan(0L));
+ }
+ return primary();
+ }
+
+ @Override
+ public String name() {
+ return "strict";
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java b/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java
new file mode 100644
index 0000000..ddab5ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.template;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.base.Charsets;
+import com.google.common.io.Files;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=1)
+public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ ImmutableSettings.Builder settingsBuilder = ImmutableSettings.settingsBuilder();
+ settingsBuilder.put(super.nodeSettings(nodeOrdinal));
+
+ try {
+ File directory = newTempDir(LifecycleScope.SUITE);
+ settingsBuilder.put("path.conf", directory.getPath());
+
+ File templatesDir = new File(directory + File.separator + "templates");
+ templatesDir.mkdir();
+
+ File dst = new File(templatesDir, "template.json");
+ String templatePath = "/org/elasticsearch/indices/template/template" + randomInt(5) + ".json";
+ logger.info("Picking template path [{}]", templatePath);
+ // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'
+ String template = Streams.copyToStringFromClasspath(templatePath);
+ Files.write(template, dst, Charsets.UTF_8);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ return settingsBuilder.build();
+ }
+
+ @Test
+ public void testThatLoadingTemplateFromFileWorks() throws Exception {
+ final int iters = atLeast(5);
+ Set<String> indices = new HashSet<String>();
+ for (int i = 0; i < iters; i++) {
+ String indexName = "foo" + randomRealisticUnicodeOfLengthBetween(0, 5);
+ if (indices.contains(indexName)) {
+ continue;
+ }
+ indices.add(indexName);
+ createIndex(indexName);
+ ensureYellow(); // ensuring yellow so the test fails faster if the template cannot be loaded
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().setIndices(indexName).get();
+ assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfShards(), is(10));
+ assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfReplicas(), is(0));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
new file mode 100644
index 0000000..3d41e4b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.template;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleIndexTemplateTests() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ // test create param
+ assertThrows(client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setCreate(true)
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ , IndexTemplateAlreadyExistsException.class
+ );
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(2));
+
+
+ // index something into test_index, will match on both templates
+ client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2")); // this will still be loaded because of the source feature
+
+ client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // now only match on one template (template_1)
+ searchResponse = client().prepareSearch("text_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2"));
+ }
+
+ @Test
+ public void testDeleteIndexTemplate() throws Exception {
+ final int existingTemplates = admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size();
+ logger.info("--> put template_1 and template_2");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> explicitly delete template_1");
+ admin().indices().prepareDeleteTemplate("template_1").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(1 + existingTemplates));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_2"), equalTo(true));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_1"), equalTo(false));
+
+
+ logger.info("--> put template_1 back");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> delete template*");
+ admin().indices().prepareDeleteTemplate("template*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(existingTemplates));
+
+ logger.info("--> delete * with no templates, make sure we don't get a failure");
+ admin().indices().prepareDeleteTemplate("*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_1");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue()));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getTemplate(), is("te*"));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getOrder(), is(0));
+
+ logger.info("--> get non-existing-template");
+ GetIndexTemplatesResponse getTemplate2Response = client().admin().indices().prepareGetTemplates("non-existing-template").execute().actionGet();
+ assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template_2");
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template3");
+ client().admin().indices().preparePutTemplate("template3")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_*");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ List<String> templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+
+ logger.info("--> get all templates");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(2).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3"));
+
+ logger.info("--> get templates template_1 and template_2");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1", "template_2").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+ }
+
+ @Test
+ public void testThatInvalidGetIndexTemplatesFails() throws Exception {
+ logger.info("--> get template null");
+ testExpectActionRequestValidationException(null);
+
+ logger.info("--> get template empty");
+ testExpectActionRequestValidationException("");
+
+ logger.info("--> get template 'a', '', 'c'");
+ testExpectActionRequestValidationException("a", "", "c");
+
+ logger.info("--> get template 'a', null, 'c'");
+ testExpectActionRequestValidationException("a", null, "c");
+ }
+
+ private void testExpectActionRequestValidationException(String... names) {
+ assertThrows(client().admin().indices().prepareGetTemplates(names),
+ ActionRequestValidationException.class,
+ "get template with " + Arrays.toString(names));
+ }
+
+ @Test
+ public void testBrokenMapping() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addMapping("type1", "abcde")
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().get("type1").string(), equalTo("abcde"));
+
+ try {
+ createIndex("test");
+ fail("create index should have failed due to broken index templates mapping");
+ } catch(ElasticsearchParseException e) {
+ //everything fine
+ }
+ }
+
+ @Test
+ public void testInvalidSettings() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setSettings(ImmutableSettings.builder().put("does_not_exist", "test"))
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().getAsMap().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().get("index.does_not_exist"), equalTo("test"));
+
+ createIndex("test");
+
+ //the wrong setting has no effect but does get stored among the index settings
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getIndexToSettings().get("test").getAsMap().get("index.does_not_exist"), equalTo("test"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/template/template0.json b/src/test/java/org/elasticsearch/indices/template/template0.json
new file mode 100644
index 0000000..3b2ace1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template0.json
@@ -0,0 +1,7 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template1.json b/src/test/java/org/elasticsearch/indices/template/template1.json
new file mode 100644
index 0000000..f918668
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template1.json
@@ -0,0 +1,7 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template2.json b/src/test/java/org/elasticsearch/indices/template/template2.json
new file mode 100644
index 0000000..c48169f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template2.json
@@ -0,0 +1,9 @@
+{
+ "template" : "foo*",
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template3.json b/src/test/java/org/elasticsearch/indices/template/template3.json
new file mode 100644
index 0000000..3114cd6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template3.json
@@ -0,0 +1,9 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template4.json b/src/test/java/org/elasticsearch/indices/template/template4.json
new file mode 100644
index 0000000..674f631
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template4.json
@@ -0,0 +1,9 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/template/template5.json b/src/test/java/org/elasticsearch/indices/template/template5.json
new file mode 100644
index 0000000..c8192c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/template/template5.json
@@ -0,0 +1,11 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java
new file mode 100644
index 0000000..5a056c5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/warmer/LocalGatewayIndicesWarmerTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.TestCluster.RestartCallback;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(numNodes=0, scope=Scope.TEST)
+public class LocalGatewayIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(LocalGatewayIndicesWarmerTests.class);
+
+ @Test
+ public void testStatePersistence() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+
+ logger.info("--> putting two templates");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+ putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> put template with warmer");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"xxx\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+
+ logger.info("--> verify warmers are registered in cluster state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(2));
+
+ IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(templateWarmers, Matchers.notNullValue());
+ assertThat(templateWarmers.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+ });
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+
+ logger.info("--> verify warmers in template are recovered");
+ IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size()));
+ for (int i = 0; i < templateWarmers.entries().size(); i++) {
+ assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name()));
+ assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source()));
+ }
+
+
+ logger.info("--> delete warmer warmer_1");
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verify warmers (delete) are registered in cluster state");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ cluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.type", "local").build();
+ }
+ });
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
new file mode 100644
index 0000000..fc1d7a1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.mapper.FieldMapper.Loading;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.warmer.IndexWarmerMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void simpleWarmerTests() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+ putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void templateWarmer() {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"*\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void createIndexWarmer() {
+ client().admin().indices().prepareCreate("test")
+ .setSource("{\n" +
+ " \"settings\" : {\n" +
+ " \"index.number_of_shards\" : 1\n" +
+ " },\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void deleteNonExistentIndexWarmerTest() {
+ createIndex("test");
+
+ try {
+ client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet(1000);
+ fail("warmer foo should not exist");
+ } catch (IndexWarmerMissingException ex) {
+ assertThat(ex.names()[0], equalTo("foo"));
+ }
+ }
+
+ @Test
+ public void deleteIndexWarmerTest() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer"));
+
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+
+ @Test // issue 3246
+ public void ensureThatIndexWarmersCanBeChangedOnRuntime() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1, "index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
+
+ logger.info("--> Disabling warmers execution");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put("index.warmer.enabled", false)).execute().actionGet();
+
+ long warmerRunsAfterDisabling = getWarmerRuns();
+ assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L));
+
+ client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet();
+
+ assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling));
+ }
+
+ @Test
+ public void gettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1, "index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+ }
+
+ private long getWarmerRuns() {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet();
+ return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total();
+ }
+
+ private long getSegmentsMemoryUsage(String idx) {
+ IndicesSegmentResponse response = client().admin().indices().segments(Requests.indicesSegmentsRequest("idx")).actionGet();
+ IndexSegments indicesSegments = response.getIndices().get(idx);
+ long total = 0;
+ for (IndexShardSegments indexShardSegments : indicesSegments) {
+ for (ShardSegments shardSegments : indexShardSegments) {
+ for (Segment segment : shardSegments) {
+ System.out.println("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getIndex());
+ total += segment.memoryInBytes;
+ }
+ }
+ }
+ return total;
+ }
+
+ private enum LoadingMethod {
+ LAZY {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).execute().actionGet();
+ }
+ },
+ EAGER {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.EAGER_VALUE)).execute().actionGet();
+ }
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ },
+ EAGER_PER_FIELD {
+ @Override
+ void createIndex(String indexName, String type, String fieldName) throws Exception {
+ client().admin().indices().prepareCreate(indexName).setSettings(ImmutableSettings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).addMapping(type, JsonXContent.contentBuilder()
+ .startObject(type)
+ .startObject("properties")
+ .startObject(fieldName)
+ .field("type", "string")
+ .startObject("norms")
+ .field("loading", Loading.EAGER_VALUE)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ }
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ };
+ private static Settings SINGLE_SHARD_NO_REPLICA = ImmutableSettings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
+ abstract void createIndex(String indexName, String type, String fieldName) throws Exception;
+ boolean isLazy() {
+ return true;
+ }
+ }
+
+ static {
+ assertTrue("remove me when LUCENE-5373 is fixed", Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_46);
+ }
+
+ @Ignore("enable me when LUCENE-5373 is fixed, see assertion above")
+ public void testEagerLoading() throws Exception {
+ for (LoadingMethod method : LoadingMethod.values()) {
+ System.out.println("METHOD " + method);
+ method.createIndex("idx", "t", "foo");
+ client().prepareIndex("idx", "t", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
+ long memoryUsage0 = getSegmentsMemoryUsage("idx");
+ // queries load norms if they were not loaded before
+ client().prepareSearch("idx").setQuery(QueryBuilders.matchQuery("foo", "bar")).execute().actionGet();
+ long memoryUsage1 = getSegmentsMemoryUsage("idx");
+ if (method.isLazy()) {
+ assertThat(memoryUsage1, greaterThan(memoryUsage0));
+ } else {
+ assertThat(memoryUsage1, equalTo(memoryUsage0));
+ }
+ cluster().wipeIndices("idx");
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java b/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
new file mode 100644
index 0000000..4d671bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.mget;
+
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleMgetTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder().startObject().field("foo", "bar").endObject()).setRefresh(true).execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(2));
+
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("test"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+
+ assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+
+ mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(1));
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+ }
+
+ @Test
+ public void testThatParentPerDocumentIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+ client().admin().indices().preparePutMapping("test").setType("test").setSource(jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("_parent")
+ .field("type", "foo")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).get();
+
+ client().prepareIndex("test", "test", "1").setParent("4").setRefresh(true)
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1").parent("4"))
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getResponse(), nullValue());
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatSourceFilteringIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+ BytesReference sourceBytesRef = jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("hidden_field", "should not be seen").endObject()
+ .field("excluded", "should not be seen")
+ .endObject().bytes();
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource(sourceBytesRef).get();
+ }
+
+ MultiGetRequestBuilder request = client().prepareMultiGet();
+ for (int i = 0; i < 100; i++) {
+ if (i % 2 == 0) {
+ request.add(new MultiGetRequest.Item("test", "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext("included", "*.hidden_field")));
+ } else {
+ request.add(new MultiGetRequest.Item("test", "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false)));
+ }
+ }
+
+ MultiGetResponse response = request.get();
+
+ assertThat(response.getResponses().length, equalTo(100));
+ for (int i = 0; i < 100; i++) {
+ MultiGetItemResponse responseItem = response.getResponses()[i];
+ if (i % 2 == 0) {
+ Map<String, Object> source = responseItem.getResponse().getSourceAsMap();
+ assertThat(source.size(), equalTo(1));
+ assertThat(source, hasKey("included"));
+ assertThat(((Map<String, Object>) source.get("included")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) source.get("included")), hasKey("field"));
+ } else {
+ assertThat(responseItem.getResponse().getSourceAsBytes(), nullValue());
+ }
+ }
+
+
+ }
+
+ @Test
+ public void testThatRoutingPerDocumentIsSupported() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setRefresh(true).setRouting("bar")
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1").routing("bar"))
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java
new file mode 100644
index 0000000..40ad9ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.mlt;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisFieldQuery;
+import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleMoreLikeThis() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ }
+
+
+ @Test
+ public void testSimpleMoreLikeOnLongField() throws Exception {
+ logger.info("Creating index test");
+ createIndex("test");
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580l).endObject())).actionGet();
+ client().index(indexRequest("test").type("type2").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())).actionGet();
+
+
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 0l);
+ }
+
+
+ @Test
+ public void testMoreLikeThisWithAliases() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ logger.info("Creating aliases alias release");
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("release", termFilter("text", "release"), "test")).actionGet();
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("beta", termFilter("text", "beta"), "test")).actionGet();
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("text", "elasticsearch beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(jsonBuilder().startObject().field("text", "elasticsearch release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis on index");
+ SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 2l);
+
+ logger.info("Running moreLikeThis on beta shard");
+ mltResponse = client().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ logger.info("Running moreLikeThis on release shard");
+ mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).searchIndices("release")).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("2"));
+
+ logger.info("Running moreLikeThis on alias with node client");
+ mltResponse = cluster().clientNodeClient().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
+ assertHitCount(mltResponse, 1l);
+ assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ }
+
+ @Test
+ public void testMoreLikeThisIssue2197() throws Exception {
+ Client client = client();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ searchResponse = client.prepareMoreLikeThis("foo", "bar", "1").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/2489
+ public void testMoreLikeWithCustomRouting() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("2")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("2").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See issue: https://github.com/elasticsearch/elasticsearch/issues/3039
+ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ prepareCreate("foo", 2, ImmutableSettings.builder().put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 2))
+ .addMapping("bar", mapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("4000")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("4000").execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse, notNullValue());
+ }
+
+ @Test
+ // See issue https://github.com/elasticsearch/elasticsearch/issues/3252
+ public void testNumericField() throws Exception {
+ prepareCreate("test").addMapping("type", jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int_value").field("type", randomNumericType(getRandom())).endObject()
+ .startObject("string_value").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", "type", "2")
+ .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ // Implicit list of fields -> ignore numeric fields
+ SearchResponse searchResponse = client().prepareMoreLikeThis("test", "type", "1").setMinDocFreq(1).setMinTermFreq(1).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ // Explicit list of fields including numeric fields -> fail
+ assertThrows(client().prepareMoreLikeThis("test", "type", "1").setField("string_value", "int_value"), SearchPhaseExecutionException.class);
+
+ // mlt query with no field -> OK
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery().likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with string fields
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value").likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with at least a numeric field -> fail by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index")), SearchPhaseExecutionException.class);
+
+ // mlt query with at least a numeric field -> fail by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+
+ // mlt query with at least a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).get();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt field query on a numeric field -> failure by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field -> failure by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(true)),
+ SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisFieldQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).execute().actionGet();
+ assertHitCount(searchResponse, 0l);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
new file mode 100644
index 0000000..c286e92
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
@@ -0,0 +1,1229 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nested;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.filter.FilterFacet;
+import org.elasticsearch.search.facet.statistical.StatisticalFacet;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleNestedTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleNested() throws Exception {
+ XContentBuilder builder = jsonBuilder().
+ startObject().
+ field("type1").
+ startObject().
+ field("properties").
+ startObject().
+ field("nested1").
+ startObject().
+ field("type").
+ value("nested").
+ endObject().
+ endObject().
+ endObject().
+ endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", builder));
+ ensureGreen();
+
+ // check on no data, see it works
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsBytes(), notNullValue());
+
+ // check the numDocs
+ IndicesStatusResponse statusResponse = admin().indices().prepareStatus().get();
+ assertNoFailures(statusResponse);
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+
+ // check that _all is working on nested docs
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // search for something that matches the nested doc, and see that we don't find the nested doc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // now, do a nested query
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // add another doc, one that would match if it was not nested...
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ statusResponse = client().admin().indices().prepareStatus().get();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // filter
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), nestedFilter("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check with type prefix
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("type1.nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check delete, so all is gone...
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "2").execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery1() throws Exception {
+ simpleNestedDeleteByQuery(3, 0);
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery2() throws Exception {
+ simpleNestedDeleteByQuery(3, 1);
+ }
+
+ @Test
+ public void simpleNestedDeletedByQuery3() throws Exception {
+ simpleNestedDeleteByQuery(3, 2);
+ }
+
+ private void simpleNestedDeleteByQuery(int total, int docToDelete) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+
+ for (int i = 0; i < total; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ }
+
+
+ flush();
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(total * 3l));
+
+ client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo((total * 3l) - 3));
+
+ for (int i = 0; i < total; i++) {
+ assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete));
+ }
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery1() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 0);
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery2() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 1);
+ }
+
+ @Test
+ public void noChildrenNestedDeletedByQuery3() throws Exception {
+ noChildrenNestedDeleteByQuery(3, 2);
+ }
+
+ private void noChildrenNestedDeleteByQuery(long total, int docToDelete) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+
+ for (int i = 0; i < total; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .endObject()).execute().actionGet();
+ }
+
+
+ flush();
+ refresh();
+
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(total));
+
+ client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo((total) - 1));
+
+ for (int i = 0; i < total; i++) {
+ assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete));
+ }
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // check the numDocs
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(7l));
+
+ // do some multi nested queries
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ termQuery("nested1.field1", "1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1.nested2",
+ termQuery("nested1.nested2.field2", "2"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testFacetsSingleShard() throws Exception {
+ testFacets(1);
+ }
+
+ @Test
+ public void testFacetsMultiShards() throws Exception {
+ testFacets(3);
+ }
+
+ private void testFacets(int numberOfShards) throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numberOfShards))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested")
+ .startObject("properties")
+ .startObject("field2_1").field("type", "string").endObject()
+ .startObject("field2_2").field("type", "long").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1_1", "1").startArray("nested2").startObject().field("field2_1", "blue").field("field2_2", 5).endObject().startObject().field("field2_1", "yellow").field("field2_2", 3).endObject().endArray().endObject()
+ .startObject().field("field1_1", "4").startArray("nested2").startObject().field("field2_1", "green").field("field2_2", 6).endObject().startObject().field("field2_1", "blue").field("field2_2", 1).endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1_1", "2").startArray("nested2").startObject().field("field2_1", "yellow").field("field2_2", 10).endObject().startObject().field("field2_1", "green").field("field2_2", 8).endObject().endArray().endObject()
+ .startObject().field("field1_1", "1").startArray("nested2").startObject().field("field2_1", "blue").field("field2_2", 2).endObject().startObject().field("field2_1", "red").field("field2_2", 12).endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addFacet(FacetBuilders.termsStatsFacet("facet1").keyField("nested1.nested2.field2_1").valueField("nested1.nested2.field2_2").nested("nested1.nested2"))
+ .addFacet(FacetBuilders.statisticalFacet("facet2").field("field2_2").nested("nested1.nested2"))
+ .addFacet(FacetBuilders.statisticalFacet("facet2_blue").field("field2_2").nested("nested1.nested2")
+ .facetFilter(boolFilter().must(termFilter("field2_1", "blue"))))
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ TermsStatsFacet termsStatsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsStatsFacet.getEntries().size(), equalTo(4));
+ assertThat(termsStatsFacet.getEntries().get(0).getTerm().string(), equalTo("blue"));
+ assertThat(termsStatsFacet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(termsStatsFacet.getEntries().get(0).getTotal(), equalTo(8d));
+ assertThat(termsStatsFacet.getEntries().get(1).getTerm().string(), equalTo("yellow"));
+ assertThat(termsStatsFacet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(termsStatsFacet.getEntries().get(1).getTotal(), equalTo(13d));
+ assertThat(termsStatsFacet.getEntries().get(2).getTerm().string(), equalTo("green"));
+ assertThat(termsStatsFacet.getEntries().get(2).getCount(), equalTo(2l));
+ assertThat(termsStatsFacet.getEntries().get(2).getTotal(), equalTo(14d));
+ assertThat(termsStatsFacet.getEntries().get(3).getTerm().string(), equalTo("red"));
+ assertThat(termsStatsFacet.getEntries().get(3).getCount(), equalTo(1l));
+ assertThat(termsStatsFacet.getEntries().get(3).getTotal(), equalTo(12d));
+
+ StatisticalFacet statsFacet = searchResponse.getFacets().facet("facet2");
+ assertThat(statsFacet.getCount(), equalTo(8l));
+ assertThat(statsFacet.getMin(), equalTo(1d));
+ assertThat(statsFacet.getMax(), equalTo(12d));
+ assertThat(statsFacet.getTotal(), equalTo(47d));
+
+ StatisticalFacet blueFacet = searchResponse.getFacets().facet("facet2_blue");
+ assertThat(blueFacet.getCount(), equalTo(3l));
+ assertThat(blueFacet.getMin(), equalTo(1d));
+ assertThat(blueFacet.getMax(), equalTo(5d));
+ assertThat(blueFacet.getTotal(), equalTo(8d));
+
+ // test scope ones (collector based)
+ searchResponse = client().prepareSearch("test")
+ .setQuery(
+ nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2_1", "blue"))
+ )
+ .addFacet(
+ FacetBuilders.termsStatsFacet("facet1")
+ .keyField("nested1.nested2.field2_1")
+ .valueField("nested1.nested2.field2_2")
+ .nested("nested1.nested2")
+ // Maybe remove the `join` option?
+ // The following also works:
+ // .facetFilter(termFilter("nested1.nested2.field2_1", "blue"))
+ .facetFilter(nestedFilter("nested1.nested2", termFilter("nested1.nested2.field2_1", "blue")).join(false))
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ termsStatsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsStatsFacet.getEntries().size(), equalTo(1));
+ assertThat(termsStatsFacet.getEntries().get(0).getTerm().string(), equalTo("blue"));
+ assertThat(termsStatsFacet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(termsStatsFacet.getEntries().get(0).getTotal(), equalTo(8d));
+
+ // test scope ones (post based)
+ searchResponse = client().prepareSearch("test")
+ .setQuery(
+ nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2_1", "blue"))
+ )
+ .addFacet(
+ FacetBuilders.filterFacet("facet1")
+ .global(true)
+ .filter(rangeFilter("nested1.nested2.field2_2").gte(0).lte(2))
+ .nested("nested1.nested2")
+ .facetFilter(nestedFilter("nested1.nested2", termFilter("nested1.nested2.field2_1", "blue")).join(false))
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ FilterFacet filterFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(filterFacet.getCount(), equalTo(2l));
+ assertThat(filterFacet.getName(), equalTo("facet1"));
+ }
+
+ @Test
+ // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in IncludeNestedDocsQuery#advance()
+ // This IncludeNestedDocsQuery also needs to be aware of the filter from alias
+ public void testDeleteNestedDocsWithAlias() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "alias1", FilterBuilders.termFilter("field1", "value1")).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value2")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+ IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
+
+ client().prepareDeleteByQuery("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ flush();
+ refresh();
+ statusResponse = client().admin().indices().prepareStatus().execute().actionGet();
+
+ // This must be 3, otherwise child docs aren't deleted.
+ // If this is 5 then only the parent has been removed
+ assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .endArray()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1")).scoreMode("total"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ Explanation explanation = searchResponse.getHits().hits()[0].explanation();
+ assertThat(explanation.getValue(), equalTo(2f));
+ assertThat(explanation.getDescription(), equalTo("Score based on child doc range from 0 to 1"));
+ // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2)
+// assertThat(explanation.getDetails().length, equalTo(2));
+// assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[0].getDescription(), equalTo("Child[0]"));
+// assertThat(explanation.getDetails()[1].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[1].getDescription(), equalTo("Child[1]"));
+ }
+
+ @Test
+ public void testSimpleNestedSorting() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.refresh_interval", -1)
+ .build()
+ )
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "long")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("6.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("3.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").sortMode("sum").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // B/c of sum it is actually +2
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("11.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("9.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("5.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "number")
+ .setNestedFilter(rangeFilter("nested1.field1").from(1).to(3))
+ .setNestedPath("nested1").sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo(Double.toString(Double.MAX_VALUE)));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("1.5"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ try {
+ client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").sortMode("sum").order(SortOrder.ASC))
+ .execute().actionGet();
+ Assert.fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.getMessage(), containsString("type [string] doesn't support mode [SUM]"));
+ }
+ }
+
+ @Test
+ public void testSimpleNestedSorting_withNestedFilterMissing() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.referesh_interval", -1)
+ .build()
+ )
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ // Doc with missing nested docs if nested filter is used
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .field("field2", false)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", false)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termFilter("nested1.field2", true)).missing(10).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("10"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termFilter("nested1.field2", true)).missing(10).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+ }
+
+ @Test
+ public void testSortNestedWithNestedFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("grand_parent_values").field("type", "long").endObject()
+ .startObject("parent").field("type", "nested")
+ .startObject("properties")
+ .startObject("parent_values").field("type", "long").endObject()
+ .startObject("child").field("type", "nested")
+ .startObject("properties")
+ .startObject("child_values").field("type", "long").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // sum: 11
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 1l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 1l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 1l)
+ .startObject("child_obj")
+ .field("value", 1l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 6l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("filter", true)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -1l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 5l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 7
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 2l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 2l)
+ .startObject("child_obj")
+ .field("value", 2l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 4l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", true)
+ .startObject("child")
+ .field("child_values", -2l)
+ .field("filter", false)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 3l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 2
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 3l)
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", false)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 3l)
+ .startObject("child_obj")
+ .field("value", 3l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 4l)
+ .field("filter", true)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -3l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Without nested filter
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // With nested filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Nested path should be automatically detected, expect same results as above search request
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.parent_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ // TODO: If we expose ToChildBlockJoinQuery we can filter sort values based on a higher level nested objects
+// assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+// assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+// assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+// assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+// assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+// assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // Check if closest nested type is resolved
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_obj.value")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: sum
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("11"));
+
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("11"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ // Sort mode: sum with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: avg
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("0"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("0"));
+
+ // Sort mode: avg with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(FilterBuilders.termFilter("parent.child.filter", true))
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
new file mode 100644
index 0000000..8db6fd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class InternalSettingsPreparerTests extends ElasticsearchTestCase {
+ @Before
+ public void setupSystemProperties() {
+ System.setProperty("es.node.zone", "foo");
+ }
+
+ @After
+ public void cleanupSystemProperties() {
+ System.clearProperty("es.node.zone");
+ }
+
+ @Test
+ public void testIgnoreSystemProperties() {
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("node.zone", "bar").build(), true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("foo"));
+
+ tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("config.ignore_system_properties", true).put("node.zone", "bar").build(), true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("bar"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java b/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
new file mode 100644
index 0000000..6104a7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.nodesinfo.plugin.dummy1.TestPlugin;
+import org.elasticsearch.nodesinfo.plugin.dummy2.TestNoVersionPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Collections;
+import java.util.List;
+
+import static com.google.common.base.Predicates.and;
+import static com.google.common.base.Predicates.isNull;
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.nodesInfoRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class SimpleNodesInfoTests extends ElasticsearchIntegrationTest {
+
+ static final class Fields {
+ static final String SITE_PLUGIN = "dummy";
+ static final String SITE_PLUGIN_DESCRIPTION = "This is a description for a dummy test site plugin.";
+ static final String SITE_PLUGIN_VERSION = "0.0.7-BOND-SITE";
+ }
+
+
+ @Test
+ public void testNodesInfos() {
+ final String node_1 = cluster().startNode();
+ final String node_2 = cluster().startNode();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ String server1NodeId = cluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
+ String server2NodeId = cluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
+ logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest()).actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+ }
+
+ /**
+ * Use case is to start 4 nodes:
+ * <ul>
+ * <li>1 : no plugin</li>
+ * <li>2 : one site plugin (with a es-plugin.properties file)</li>
+ * <li>3 : one java plugin</li>
+ * <li>4 : one site plugin and 2 java plugins (included the previous one)</li>
+ * </ul>
+ * We test here that NodeInfo API with plugin option give us the right results.
+ * @throws URISyntaxException
+ */
+ @Test
+ public void testNodeInfoPlugin() throws URISyntaxException {
+ // We start four nodes
+ // The first has no plugin
+ String server1NodeId = startNodeWithPlugins(1);
+ // The second has one site plugin with a es-plugin.properties file (description and version)
+ String server2NodeId = startNodeWithPlugins(2);
+ // The third has one java plugin
+ String server3NodeId = startNodeWithPlugins(3,TestPlugin.class.getName());
+ // The fourth has one java plugin and one site plugin
+ String server4NodeId = startNodeWithPlugins(4,TestNoVersionPlugin.class.getName());
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugin(true).execute().actionGet();
+ logger.info("--> full json answer, status " + response.toString());
+
+ assertNodeContainsPlugins(response, server1NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
+
+ assertNodeContainsPlugins(response, server2NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN), // Site Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN_DESCRIPTION),
+ Lists.newArrayList(Fields.SITE_PLUGIN_VERSION));
+
+ assertNodeContainsPlugins(response, server3NodeId,
+ Lists.newArrayList(TestPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No site Plugin
+
+ assertNodeContainsPlugins(response, server4NodeId,
+ Lists.newArrayList(TestNoVersionPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestNoVersionPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Lists.newArrayList(Fields.SITE_PLUGIN, TestNoVersionPlugin.Fields.NAME),// Site Plugin
+ Lists.newArrayList(PluginInfo.DESCRIPTION_NOT_AVAILABLE),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE));
+ }
+
+ private void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId,
+ List<String> expectedJvmPluginNames,
+ List<String> expectedJvmPluginDescriptions,
+ List<String> expectedJvmVersions,
+ List<String> expectedSitePluginNames,
+ List<String> expectedSitePluginDescriptions,
+ List<String> expectedSiteVersions) {
+
+ assertThat(response.getNodesMap().get(nodeId), notNullValue());
+
+ PluginsInfo plugins = response.getNodesMap().get(nodeId).getPlugins();
+ assertThat(plugins, notNullValue());
+
+ List<String> pluginNames = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(nameFunction).toList();
+ for (String expectedJvmPluginName : expectedJvmPluginNames) {
+ assertThat(pluginNames, hasItem(expectedJvmPluginName));
+ }
+
+ List<String> pluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(descriptionFunction).toList();
+ for (String expectedJvmPluginDescription : expectedJvmPluginDescriptions) {
+ assertThat(pluginDescriptions, hasItem(expectedJvmPluginDescription));
+ }
+
+ List<String> jvmPluginVersions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(versionFunction).toList();
+ for (String pluginVersion : expectedJvmVersions) {
+ assertThat(jvmPluginVersions, hasItem(pluginVersion));
+ }
+
+ FluentIterable<String> jvmUrls = FluentIterable.from(plugins.getInfos())
+ .filter(and(jvmPluginPredicate, Predicates.not(sitePluginPredicate)))
+ .filter(isNull())
+ .transform(urlFunction);
+ assertThat(Iterables.size(jvmUrls), is(0));
+
+ List<String> sitePluginNames = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(nameFunction).toList();
+ for (String expectedSitePluginName : expectedSitePluginNames) {
+ assertThat(sitePluginNames, hasItem(expectedSitePluginName));
+ }
+
+ List<String> sitePluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(descriptionFunction).toList();
+ for (String sitePluginDescription : expectedSitePluginDescriptions) {
+ assertThat(sitePluginDescriptions, hasItem(sitePluginDescription));
+ }
+
+ List<String> sitePluginUrls = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(urlFunction).toList();
+ assertThat(sitePluginUrls, not(contains(nullValue())));
+
+
+ List<String> sitePluginVersions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(versionFunction).toList();
+ for (String pluginVersion : expectedSiteVersions) {
+ assertThat(sitePluginVersions, hasItem(pluginVersion));
+ }
+ }
+
+ private String startNodeWithPlugins(int nodeId, String ... pluginClassNames) throws URISyntaxException {
+ URL resource = SimpleNodesInfoTests.class.getResource("/org/elasticsearch/nodesinfo/node" + Integer.toString(nodeId) + "/");
+ ImmutableSettings.Builder settings = settingsBuilder();
+ if (resource != null) {
+ settings.put("path.plugins", new File(resource.toURI()).getAbsolutePath());
+ }
+
+ if (pluginClassNames.length > 0) {
+ settings.putArray("plugin.types", pluginClassNames);
+ }
+
+ String nodeName = cluster().startNode(settings);
+
+ // We wait for a Green status
+ client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+
+ String serverNodeId = cluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId();
+ logger.debug("--> server {} started" + serverNodeId);
+ return serverNodeId;
+ }
+
+
+ private Predicate<PluginInfo> jvmPluginPredicate = new Predicate<PluginInfo>() {
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isJvm();
+ }
+ };
+
+ private Predicate<PluginInfo> sitePluginPredicate = new Predicate<PluginInfo>() {
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isSite();
+ }
+ };
+
+ private Function<PluginInfo, String> nameFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getName();
+ }
+ };
+
+ private Function<PluginInfo, String> descriptionFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getDescription();
+ }
+ };
+
+ private Function<PluginInfo, String> urlFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getUrl();
+ }
+ };
+
+ private Function<PluginInfo, String> versionFunction = new Function<PluginInfo, String>() {
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getVersion();
+ }
+ };
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
new file mode 100644
index 0000000..274e5e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy1;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
new file mode 100644
index 0000000..58b5ee0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy2;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestNoVersionPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-no-version-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
new file mode 100644
index 0000000..d752bde
--- /dev/null
+++ b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.operateAllIndices;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // One test for test performance, since cluster scope is test
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testDestructiveOperations() throws Exception {
+ Settings settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareDelete("1index").get());
+
+ try {
+ // should fail since index1 is the only index.
+ client().admin().indices().prepareDelete("i*").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ try {
+ client().admin().indices().prepareDelete("_all").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareDelete("_all").get());
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ // end delete index:
+ // close index:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+ ensureYellow();// wait for primaries to be allocated
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareClose("1index").get());
+
+ try {
+ client().admin().indices().prepareClose("_all").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ try {
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().admin().indices().prepareClose("*").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+ try {
+ assertAcked(client().admin().indices().prepareOpen("*").get());
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+
+ // end close index:
+ client().admin().indices().prepareDelete("_all").get();
+ // delete_by_query:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+
+ // Should succeed, since no wildcards
+ client().prepareDeleteByQuery("1index").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ try {
+ client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ try {
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {}
+
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get();
+
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+ client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get();
+ // end delete_by_query:
+ client().admin().indices().prepareDelete("_all").get();
+ // delete mapping:
+ settings = ImmutableSettings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+
+ assertAcked(client().admin().indices().prepareCreate("index1").addMapping("1", "field1", "type=string").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").addMapping("1", "field1", "type=string").get());
+
+ // Should succeed, since no wildcards
+ client().admin().indices().prepareDeleteMapping("1index").setType("1").get();
+ try {
+ client().admin().indices().prepareDeleteMapping("_all").setType("1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().admin().indices().prepareDeleteMapping().setIndices("*").setType("1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+
+ settings = ImmutableSettings.builder().put(DestructiveOperations.REQUIRES_NAME, false).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().preparePutMapping("1index").setType("1").setSource("field1", "type=string"));
+ assertAcked(client().admin().indices().prepareDeleteMapping().setIndices("*").setType("1"));
+ assertAcked(client().admin().indices().preparePutMapping("1index").setType("1").setSource("field1", "type=string"));
+ assertAcked(client().admin().indices().prepareDeleteMapping("_all").setType("1"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
new file mode 100644
index 0000000..c769277
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+public class ConcurrentPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleConcurrentPercolator() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+
+ final BytesReference onlyField1 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .endObject().endObject().bytes();
+ final BytesReference onlyField2 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+ final BytesReference bothFields = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+
+
+ // We need to index a document / define mapping, otherwise field1 doesn't get reconized as number field.
+ // If we don't do this, then 'test2' percolate query gets parsed as a TermQuery and not a RangeQuery.
+ // The percolate api doesn't parse the doc if no queries have registered, so it can't lazily create a mapping
+ client().prepareIndex("index", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicInteger counts = new AtomicInteger(0);
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<Throwable>();
+ Thread[] threads = new Thread[5];
+
+ for (int i = 0; i < threads.length; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ start.await();
+ while (!stop.get()) {
+ int count = counts.incrementAndGet();
+ if ((count > 10000)) {
+ stop.set(true);
+ }
+ PercolateResponse percolate;
+ if (count % 3 == 0) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(bothFields)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContainingInAnyOrder("test1", "test2"));
+ } else if (count % 3 == 1) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test1"));
+ } else {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test2"));
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } catch (Throwable e) {
+ exceptionHolder.set(e);
+ Thread.currentThread().interrupt();
+ }
+ }
+ };
+ threads[i] = new Thread(r);
+ threads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+
+ Throwable assertionError = exceptionHolder.get();
+ if (assertionError != null) {
+ assertionError.printStackTrace();
+ }
+ assertThat(assertionError + " should be null", assertionError, nullValue());
+ }
+
+ @Test
+ public void testConcurrentAddingAndPercolating() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+ final int numIndexThreads = 3;
+ final int numPercolateThreads = 6;
+ final int numPercolatorOperationsPerThread = 1000;
+
+ final Set<Throwable> exceptionsHolder = ConcurrentCollections.newConcurrentSet();
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicInteger runningPercolateThreads = new AtomicInteger(numPercolateThreads);
+ final AtomicInteger type1 = new AtomicInteger();
+ final AtomicInteger type2 = new AtomicInteger();
+ final AtomicInteger type3 = new AtomicInteger();
+
+ final AtomicInteger idGen = new AtomicInteger();
+
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ for (int i = 0; i < numIndexThreads; i++) {
+ final Random rand = new Random(getRandom().nextLong());
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ XContentBuilder onlyField2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field2", "value")).endObject();
+ XContentBuilder field1And2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", boolQuery().must(termQuery("field1", "value")).must(termQuery("field2", "value"))).endObject();
+
+ start.await();
+ while (runningPercolateThreads.get() > 0) {
+ Thread.sleep(100);
+ int x = rand.nextInt(3);
+ String id = Integer.toString(idGen.incrementAndGet());
+ IndexResponse response;
+ switch (x) {
+ case 0:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField1)
+ .execute().actionGet();
+ type1.incrementAndGet();
+ break;
+ case 1:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField2)
+ .execute().actionGet();
+ type2.incrementAndGet();
+ break;
+ case 2:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(field1And2)
+ .execute().actionGet();
+ type3.incrementAndGet();
+ break;
+ default:
+ throw new IllegalStateException("Illegal x=" + x);
+ }
+ assertThat(response.getId(), equalTo(id));
+ assertThat(response.getVersion(), equalTo(1l));
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ Thread[] percolateThreads = new Thread[numPercolateThreads];
+ for (int i = 0; i < numPercolateThreads; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ XContentBuilder onlyField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject();
+ XContentBuilder field1AndField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .field("field2", "value")
+ .endObject().endObject();
+ Random random = getRandom();
+ start.await();
+ for (int counter = 0; counter < numPercolatorOperationsPerThread; counter++) {
+ int x = random.nextInt(3);
+ int atLeastExpected;
+ PercolateResponse response;
+ switch (x) {
+ case 0:
+ atLeastExpected = type1.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 1:
+ atLeastExpected = type2.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 2:
+ atLeastExpected = type3.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(field1AndField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ }
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in percolate thread...", t);
+ } finally {
+ runningPercolateThreads.decrementAndGet();
+ }
+ }
+ };
+ percolateThreads[i] = new Thread(r);
+ percolateThreads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ for (Thread thread : percolateThreads) {
+ thread.join();
+ }
+
+ for (Throwable t : exceptionsHolder) {
+ logger.error("Unexpected exception {}", t.getMessage(), t);
+ }
+ assertThat(exceptionsHolder.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConcurrentAddingAndRemovingWhilePercolating() throws Exception {
+ client().admin().indices().prepareCreate("index").setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build()
+ ).execute().actionGet();
+ ensureGreen();
+ final int numIndexThreads = 3;
+ final int numberPercolateOperation = 100;
+
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<Throwable>(null);
+ final AtomicInteger idGen = new AtomicInteger(0);
+ final Set<String> liveIds = ConcurrentCollections.newConcurrentSet();
+ final AtomicBoolean run = new AtomicBoolean(true);
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ final Semaphore semaphore = new Semaphore(numIndexThreads, true);
+ for (int i = 0; i < indexThreads.length; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ outer:
+ while (run.get()) {
+ semaphore.acquire();
+ try {
+ if (!liveIds.isEmpty() && getRandom().nextInt(100) < 19) {
+ String id;
+ do {
+ if (liveIds.isEmpty()) {
+ continue outer;
+ }
+ id = Integer.toString(randomInt(idGen.get()));
+ } while (!liveIds.remove(id));
+
+ DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id)
+ .execute().actionGet();
+ assertThat(response.getId(), equalTo(id));
+ assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true));
+ } else {
+ String id = Integer.toString(idGen.getAndIncrement());
+ IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(doc)
+ .execute().actionGet();
+ liveIds.add(id);
+ assertThat(response.isCreated(), equalTo(true)); // We only add new docs
+ assertThat(response.getId(), equalTo(id));
+ }
+ } finally {
+ semaphore.release();
+ }
+ }
+ } catch (InterruptedException iex) {
+ logger.error("indexing thread was interrupted...", iex);
+ run.set(false);
+ } catch (Throwable t) {
+ run.set(false);
+ exceptionHolder.set(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ XContentBuilder percolateDoc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ for (int counter = 0; counter < numberPercolateOperation; counter++) {
+ Thread.sleep(5);
+ semaphore.acquire(numIndexThreads);
+ try {
+ if (!run.get()) {
+ break;
+ }
+ int atLeastExpected = liveIds.size();
+ PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(percolateDoc).execute().actionGet();
+ assertThat(response.getShardFailures(), emptyArray());
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, equalTo(atLeastExpected));
+ } finally {
+ semaphore.release(numIndexThreads);
+ }
+ }
+ run.set(false);
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ assertThat("exceptionHolder should have been empty, but holds: " + exceptionHolder.toString(), exceptionHolder.get(), nullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
new file mode 100644
index 0000000..56d2ffa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBasics() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ MultiPercolateResponse response = client().prepareMultiPercolate()
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
+ .add(client().preparePercolate() // non existing doc, so error element
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5")))
+ .execute().actionGet();
+
+ MultiPercolateResponse.Item item = response.getItems()[0];
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(item.errorMessage(), nullValue());
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ item = response.getItems()[1];
+ assertThat(item.errorMessage(), nullValue());
+
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ item = response.getItems()[2];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 4l);
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ item = response.getItems()[3];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 1l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
+
+ item = response.getItems()[4];
+ assertThat(item.getResponse(), nullValue());
+ assertThat(item.errorMessage(), notNullValue());
+ assertThat(item.errorMessage(), containsString("document missing"));
+ }
+
+ @Test
+ public void testExistingDocsOnly() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .execute().actionGet();
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // Non existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(true));
+ assertThat(item.errorMessage(), containsString("document missing"));
+ assertThat(item.getResponse(), nullValue());
+ }
+
+ // One existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ @Test
+ public void testWithDocsOnly() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // All illegal json
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(0));
+ assertThat(item.getResponse().getShardFailures().length, equalTo(2));
+ for (ShardOperationFailedException shardFailure : item.getResponse().getShardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Failed to derive xcontent from"));
+ assertThat(shardFailure.status().getStatus(), equalTo(500));
+ }
+ }
+
+ // one valid request
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
new file mode 100644
index 0000000..83e25ca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Just test the integration with facets and aggregations, not the facet and aggregation functionality!
+ public void testFacetsAndAggregations() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ int numQueries = atLeast(250);
+ int numUniqueQueries = between(1, numQueries / 2);
+ String[] values = new String[numUniqueQueries];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "value" + i;
+ }
+ int[] expectedCount = new int[numUniqueQueries];
+
+ logger.info("--> registering {} queries", numQueries);
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ expectedCount[i % numUniqueQueries]++;
+ QueryBuilder queryBuilder = matchQuery("field1", value);
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject())
+ .execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()));
+
+ boolean useAggs = randomBoolean();
+ if (useAggs) {
+ percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2"));
+ } else {
+ percolateRequestBuilder.addFacet(FacetBuilders.termsFacet("a").field("field2"));
+
+ }
+
+ if (randomBoolean()) {
+ percolateRequestBuilder.setPercolateQuery(matchAllQuery());
+ }
+ if (randomBoolean()) {
+ percolateRequestBuilder.setScore(true);
+ } else {
+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);
+ }
+
+ boolean countOnly = randomBoolean();
+ if (countOnly) {
+ percolateRequestBuilder.setOnlyCount(countOnly);
+ }
+
+ PercolateResponse response = percolateRequestBuilder.execute().actionGet();
+ assertMatchCount(response, expectedCount[i % numUniqueQueries]);
+ if (!countOnly) {
+ assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries]));
+ }
+
+ if (useAggs) {
+ List<Aggregation> aggregations = response.getAggregations().asList();
+ assertThat(aggregations.size(), equalTo(1));
+ assertThat(aggregations.get(0).getName(), equalTo("a"));
+ List<Terms.Bucket> buckets = new ArrayList<Terms.Bucket>(((Terms) aggregations.get(0)).getBuckets());
+ assertThat(buckets.size(), equalTo(1));
+ assertThat(buckets.get(0).getKeyAsText().string(), equalTo("b"));
+ assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length]));
+ } else {
+ assertThat(response.getFacets().facets().size(), equalTo(1));
+ assertThat(response.getFacets().facets().get(0).getName(), equalTo("a"));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().size(), equalTo(1));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().get(0).getCount(), equalTo(expectedCount[i % values.length]));
+ assertThat(((TermsFacet) response.getFacets().facets().get(0)).getEntries().get(0).getTerm().string(), equalTo("b"));
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
new file mode 100644
index 0000000..26efb4f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
@@ -0,0 +1,1766 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class PercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple1() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search dummy doc, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test", "test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ logger.info("--> Percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testSimple2() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=long"));
+
+ // introduce the doc
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate().setSource(doc)
+ .setIndices("test").setDocumentType("type1")
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), emptyArray());
+
+ // add first query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+
+ // add second query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc)
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("test1", "test2"));
+
+
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "test2").execute().actionGet();
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+
+ // add a range query (cached)
+ // add a query
+ client().prepareIndex("test1", PercolatorService.TYPE_NAME)
+ .setSource(
+ XContentFactory.jsonBuilder().startObject().field("query",
+ constantScoreQuery(FilterBuilders.rangeFilter("field2").from(1).to(5).includeLower(true).setExecution("fielddata"))
+ ).endObject()
+ )
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+ }
+
+ @Test
+ public void testRangeFilterThatUsesFD() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=long")
+ .get();
+
+
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(
+ XContentFactory.jsonBuilder().startObject().field("query",
+ constantScoreQuery(FilterBuilders.rangeFilter("field1").from(1).to(5).setExecution("fielddata"))
+ ).endObject()
+ ).get();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setPercolateDoc(PercolateSourceBuilder.docBuilder().setDoc("field1", 3)).get();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+ }
+
+ @Test
+ public void testPercolateQueriesWithRouting() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .setRouting(Integer.toString(i % 2))
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc with no routing");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 100l);
+ assertThat(response.getMatches(), arrayWithSize(100));
+
+ logger.info("--> Percolate doc with routing=0");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("0")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+
+ logger.info("--> Percolate doc with routing=1");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("1")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+ }
+
+ @Test
+ public void percolateOnRecreatedIndex() throws Exception {
+ prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ cluster().wipeIndices("test");
+ prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+ }
+
+ @Test
+ // see #2814
+ public void percolateCustomAnalyzer() throws Exception {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.lwhitespacecomma.tokenizer", "whitespacecomma");
+ builder.putArray("index.analysis.analyzer.lwhitespacecomma.filter", "lowercase");
+ builder.put("index.analysis.tokenizer.whitespacecomma.type", "pattern");
+ builder.put("index.analysis.tokenizer.whitespacecomma.pattern", "(,|\\s+)");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startObject("properties")
+ .startObject("filingcategory").field("type", "string").field("analyzer", "lwhitespacecomma").endObject()
+ .endObject()
+ .endObject().endObject();
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("doc", mapping)
+ .setSettings(builder.put("index.number_of_shards", 1))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("source", "productizer")
+ .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryString("filingcategory:s")))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("filingcategory", "s").endObject()
+ .field("query", termQuery("source", "productizer"))
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ }
+
+ @Test
+ public void createIndexAndThenRegisterPercolator() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .execute().actionGet();
+
+ refresh();
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(1l));
+
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setPreference("_local")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+
+ logger.info("--> delete the index");
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ logger.info("--> make sure percolated queries for it have been deleted as well");
+ countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void multiplePercolators() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ }
+
+ @Test
+ public void dynamicAddingRemovingQueries() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ logger.info("--> register a query 3");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "susu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "red")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder()
+ .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject()))
+ .setQueryBuilder(termQuery("color", "red"));
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(sourceBuilder)
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("susu"));
+
+ logger.info("--> deleting query 1");
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").startObject("type1")
+ .field("field1", "value1")
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+ }
+
+ @Test
+ public void percolateWithSizeField() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_size").field("enabled", true).field("stored", "yes").endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> percolate a document");
+ PercolateResponse percolate = client().preparePercolate().setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+ }
+
+ @Test
+ public void testPercolateStatistics() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> First percolate request");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ IndicesStatsResponse indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo(5l)); // We have 5 partitions
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo(2l)); // One primary and replica
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ long percolateCount = 0;
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo(5l)); // We have 5 partitions
+
+ logger.info("--> Second percolate request");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ indicesResponse = client().admin().indices().prepareStats().setPercolate(true).execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo(10l));
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo(2l));
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ percolateCount = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo(10l));
+
+ // We might be faster than 1 ms, so run upto 1000 times until have spend 1ms or more on percolating
+ boolean moreThanOneMs = false;
+ int counter = 3; // We already ran two times.
+ do {
+ indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ if (indicesResponse.getTotal().getPercolate().getTimeInMillis() > 0) {
+ moreThanOneMs = true;
+ break;
+ }
+
+ logger.info("--> {}th percolate request", counter);
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+ } while (++counter <= 1000);
+ assertTrue("Something is off, we should have spent at least 1ms on percolating...", moreThanOneMs);
+
+ long percolateSumTime = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ percolateSumTime += nodeStats.getIndices().getPercolate().getTimeInMillis();
+ }
+ assertThat(percolateSumTime, greaterThan(0l));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search normals docs, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(1).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(2).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(3).type(), equalTo("type"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_routing() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").setRouting("4").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").setRouting("3").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").setRouting("2").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").setRouting("1").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1").routing("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").routing("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3").routing("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4").routing("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_versionCheck() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> registering queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(1l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ fail("Error should have been thrown");
+ } catch (VersionConflictEngineException e) {
+ }
+
+ logger.info("--> Index doc with id for the second time");
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+ }
+
+ @Test
+ public void testPercolateMultipleIndicesAndAliases() throws Exception {
+ client().admin().indices().prepareCreate("test1")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ client().admin().indices().prepareCreate("test2")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> registering queries");
+ for (int i = 1; i <= 10; i++) {
+ String index = i % 2 == 0 ? "test1" : "test2";
+ client().prepareIndex(index, PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc to index test1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test2");
+ response = client().preparePercolate()
+ .setIndices("test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test1 and test2");
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+
+ logger.info("--> Percolate doc to index test2 and test3, with ignore missing");
+ response = client().preparePercolate()
+ .setIndices("test1", "test3").setDocumentType("type")
+ .setIndicesOptions(IndicesOptions.lenient())
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Adding aliases");
+ IndicesAliasesResponse aliasesResponse = client().admin().indices().prepareAliases()
+ .addAlias("test1", "my-alias1")
+ .addAlias("test2", "my-alias1")
+ .addAlias("test2", "my-alias2")
+ .setTimeout(TimeValue.timeValueHours(10))
+ .execute().actionGet();
+ assertTrue(aliasesResponse.isAcknowledged());
+
+ logger.info("--> Percolate doc to my-alias1");
+ response = client().preparePercolate()
+ .setIndices("my-alias1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ logger.info("--> Percolate doc to my-alias2");
+ response = client().preparePercolate()
+ .setIndices("my-alias2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), equalTo("test2"));
+ }
+
+ }
+
+ @Test
+ public void testCountPercolation() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testCountPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+ }
+
+ @Test
+ public void testPercolateSizingWithQueryAndFilter() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ int numLevels = randomIntBetween(1, 25);
+ long numQueriesPerLevel = randomIntBetween(10, 250);
+ long totalQueries = numLevels * numQueriesPerLevel;
+ logger.info("--> register " + totalQueries + " queries");
+ for (int level = 1; level <= numLevels; level++) {
+ for (int query = 1; query <= numQueriesPerLevel; query++) {
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", level).endObject())
+ .execute().actionGet();
+ }
+ }
+
+ boolean onlyCount = randomBoolean();
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) totalQueries));
+ }
+
+ int size = randomIntBetween(0, (int) totalQueries - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setSize(size)
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+
+ // The query / filter capabilities are NOT in realtime
+ client().admin().indices().prepareRefresh("my-index").execute().actionGet();
+
+ int runs = randomIntBetween(3, 16);
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateFilter(termFilter("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ size = randomIntBetween(0, (int) numQueriesPerLevel - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateFilter(termFilter("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateScoreAndSorting() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 1)
+ .build())
+ .execute().actionGet();
+ ensureGreen();
+
+ // Add a dummy doc, that shouldn't never interfere with percolate operations.
+ client().prepareIndex("my-index", "my-type", "1").setSource("field", "value").execute().actionGet();
+
+ Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<Integer, NavigableSet<Integer>>();
+ long numQueries = randomIntBetween(100, 250);
+ logger.info("--> register " + numQueries + " queries");
+ for (int i = 0; i < numQueries; i++) {
+ int value = randomInt(10);
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", i).field("field1", value).endObject())
+ .execute().actionGet();
+ if (!controlMap.containsKey(value)) {
+ controlMap.put(value, new TreeSet<Integer>());
+ }
+ controlMap.get(value).add(i);
+ }
+ List<Integer> usedValues = new ArrayList<Integer>(controlMap.keySet());
+ refresh();
+
+ // Only retrieve the score
+ int runs = randomInt(27);
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 50);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+ for (int j = 0; j < response.getMatches().length; j++) {
+ String id = response.getMatches()[j].getId().string();
+ assertThat(Integer.valueOf(id), equalTo((int) response.getMatches()[j].getScore()));
+ }
+ }
+
+ // Sort the queries by the score
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 10);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+
+ int expectedId = (int) (numQueries - 1);
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getId().string(), equalTo(Integer.toString(expectedId)));
+ assertThat(match.getScore(), equalTo((float) expectedId));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ expectedId--;
+ }
+ }
+
+
+ for (int i = 0; i < runs; i++) {
+ int value = usedValues.get(randomInt(usedValues.size() - 1));
+ NavigableSet<Integer> levels = controlMap.get(value);
+ int size = randomIntBetween(1, levels.size());
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchQuery("field1", value), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+
+ assertMatchCount(response, levels.size());
+ assertThat(response.getMatches().length, equalTo(Math.min(levels.size(), size)));
+ Iterator<Integer> levelIterator = levels.descendingIterator();
+ for (PercolateResponse.Match match : response) {
+ int controlLevel = levelIterator.next();
+ assertThat(match.getId().string(), equalTo(Integer.toString(controlLevel)));
+ assertThat(match.getScore(), equalTo((float) controlLevel));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateSortingWithNoSize() throws Exception {
+ client().admin().indices().prepareCreate("my-index").execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .execute().actionGet();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches()[0].getId().string(), equalTo("2"));
+ assertThat(response.getMatches()[0].getScore(), equalTo(2f));
+ assertThat(response.getMatches()[1].getId().string(), equalTo("1"));
+ assertThat(response.getMatches()[1].getScore(), equalTo(1f));
+
+ response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(0l));
+ assertThat(response.getSuccessfulShards(), equalTo(3));
+ assertThat(response.getShardFailures().length, equalTo(2));
+ assertThat(response.getShardFailures()[0].status().getStatus(), equalTo(400));
+ assertThat(response.getShardFailures()[0].reason(), containsString("Can't sort if size isn't specified"));
+ assertThat(response.getShardFailures()[1].status().getStatus(), equalTo(400));
+ assertThat(response.getShardFailures()[1].reason(), containsString("Can't sort if size isn't specified"));
+ }
+
+ @Test
+ public void testPercolateSorting_unsupportedField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", "field", "type=string")
+ .addMapping(PercolatorService.TYPE_NAME, "level", "type=integer", "query", "type=object,enabled=false")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .get();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .get();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .addSort(SortBuilders.fieldSort("level"))
+ .get();
+
+ assertThat(response.getShardFailures().length, equalTo(5));
+ assertThat(response.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(response.getShardFailures()[0].reason(), containsString("Only _score desc is supported"));
+ }
+
+ @Test
+ public void testPercolateOnEmptyIndex() throws Exception {
+ client().admin().indices().prepareCreate("my-index").execute().actionGet();
+ ensureGreen();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolateNotEmptyIndexButNoRefresh() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction("doc['level'].value")))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolatorWithHighlighting() throws Exception {
+ Client client = client();
+ client.admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ if (randomBoolean()) {
+ // FVH HL
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", randomBoolean())
+ .field("term_vector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ } else if (randomBoolean()) {
+ // plain hl with stored fields
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", true).endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ } else if (randomBoolean()) {
+ // positions hl
+ client.admin().indices().preparePutMapping("test").setType("type")
+ .setSource(
+ jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string")
+ .field("index_options", "offsets")
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ ).get();
+ }
+
+ logger.info("--> register a queries");
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject())
+ .execute().actionGet();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "5")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject())
+ .execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ PercolateResponse response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ PercolateResponse.Match[] matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ // Anything with percolate query isn't realtime
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Query percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(matchAllQuery())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Query percolate with score for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setScore(true)
+ .execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1").highlightQuery(QueryBuilders.matchQuery("field1", "jumps")))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+
+ // Highlighting an existing doc
+ client.prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())
+ .get();
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testDeletePercolatorType() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test1"));
+ assertAcked(client().admin().indices().prepareCreate("test2"));
+
+ client().prepareIndex("test1", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test2", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ for (Client client : cluster()) {
+ GetMappingsResponse getMappingsResponse = client.admin().indices().prepareGetMappings("test1", "test2").get();
+ boolean hasPercolatorType = getMappingsResponse.getMappings().get("test1").containsKey(PercolatorService.TYPE_NAME);
+ if (!hasPercolatorType) {
+ return false;
+ }
+
+ if (!getMappingsResponse.getMappings().get("test2").containsKey(PercolatorService.TYPE_NAME)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ });
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test1").setType(PercolatorService.TYPE_NAME));
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+
+ assertAcked(client().admin().indices().prepareDeleteMapping("test2").setType(PercolatorService.TYPE_NAME));
+ // Percolate api should return 0 matches, because all docs in _percolate type have been removed.
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ public static String[] convertFromTextArray(PercolateResponse.Match[] matches, String index) {
+ if (matches.length == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ String[] strings = new String[matches.length];
+ for (int i = 0; i < matches.length; i++) {
+ assertEquals(index, matches[i].getIndex().string());
+ strings[i] = matches[i].getId().string();
+ }
+ return strings;
+ }
+
+ @Test
+ public void percolateNonMatchingConstantScoreQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test"));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("query", QueryBuilders.constantScoreQuery(FilterBuilders.andFilter(
+ FilterBuilders.queryFilter(QueryBuilders.queryString("root")),
+ FilterBuilders.termFilter("message", "tree"))))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("message", "A new bonsai tree ").endObject()
+ .endObject())
+ .execute().actionGet();
+ assertThat(percolate.getFailedShards(), equalTo(0));
+ assertMatchCount(percolate, 0l);
+ }
+
+ @Test
+ public void testPercolationWithDynamicTemplates() throws Exception {
+ assertAcked(prepareCreate("idx").addMapping("type", jsonBuilder().startObject().startObject("type")
+ .field("dynamic", false)
+ .startObject("properties")
+ .startObject("custom")
+ .field("dynamic", true)
+ .field("type", "object")
+ .field("incude_in_all", false)
+ .endObject()
+ .endObject()
+ .startArray("dynamic_template")
+ .startObject()
+ .startObject("custom_fields")
+ .field("path_match", "custom.*")
+ .startObject("mapping")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().endObject()));
+
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:red")).endObject())
+ .get();
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:blue")).endObject())
+ .get();
+
+ PercolateResponse percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), arrayWithSize(0));
+
+ // wait until the mapping change has propagated from the percolate request
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+
+ // The previous percolate request introduced the custom.color field, so now we register the query again
+ // and the field name `color` will be resolved to `custom.color` field in mapping via smart field mapping resolving.
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryString("color:blue")).endObject())
+ .get();
+
+ // The second request will yield a match, since the query during the proper field during parsing.
+ percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 1l);
+ assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("2"));
+ }
+
+ @Test
+ public void testUpdateMappingDynamicallyWhilePercolating() throws Exception {
+ createIndex("test");
+ ensureSearchable();
+
+ // percolation source
+ XContentBuilder percolateDocumentSource = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateDocumentSource).execute().actionGet();
+ assertAllSuccessful(response);
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), arrayWithSize(0));
+
+ // wait until the mapping change has propagated
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ PendingClusterTasksResponse pendingTasks = client().admin().cluster().preparePendingClusterTasks().get();
+ return pendingTasks.pendingTasks().isEmpty();
+ }
+ });
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute().actionGet();
+
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ assertThat(mappingsResponse.getMappings().get("test"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().isEmpty(), is(false));
+ Map<String, Object> properties = (Map<String, Object>) mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().get("properties");
+ assertThat(((Map<String, String>) properties.get("field1")).get("type"), equalTo("long"));
+ assertThat(((Map<String, String>) properties.get("field2")).get("type"), equalTo("string"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
new file mode 100644
index 0000000..de63c3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0)
+public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator1() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ cluster().startNode(settings);
+ client().admin().indices().prepareCreate("test").setSettings(
+ settingsBuilder().put("index.number_of_shards", 1).put()
+ ).execute().actionGet();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ cluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator2() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ cluster().startNode(settings);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ cluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ DeleteIndexResponse actionGet = client().admin().indices().prepareDelete("test").execute().actionGet();
+ assertThat(actionGet.isAcknowledged(), equalTo(true));
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ @TestLogging("index.percolator:TRACE,percolator:TRACE")
+ public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(super.indexSettings())
+ .put("gateway.type", "local")
+ .build();
+ logger.info("--> Starting 2 nodes");
+ cluster().startNode(settings);
+ cluster().startNode(settings);
+
+ client().admin().indices().prepareDelete("_all").execute().actionGet();
+ ensureGreen();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy docs");
+ client().prepareIndex("test", "type1", "1").setSource("field1", 0).execute().actionGet();
+ client().prepareIndex("test", "type2", "1").setSource("field1", "0").execute().actionGet();
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("field1").from(0).to(i))
+ // The type must be set now, because two fields with the same name exist in different types.
+ // Setting the type to `type1`, makes sure that the range query gets parsed to a Lucene NumericRangeQuery.
+ .field("type", "type1")
+ .endObject())
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc with field1=95");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 95).endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 6l);
+ assertThat(response.getMatches(), arrayWithSize(6));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("95", "96", "97", "98", "99", "100"));
+
+ logger.info("--> Close and open index to trigger percolate queries loading...");
+ assertAcked(client().admin().indices().prepareClose("test"));
+ assertAcked(client().admin().indices().prepareOpen("test"));
+ ensureGreen();
+
+ logger.info("--> Percolate doc with field1=100");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 100).endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("100"));
+ }
+
+ @Test
+ @Slow
+ public void testSinglePercolator_recovery() throws Exception {
+ percolatorRecovery(false);
+ }
+
+ @Test
+ @Slow
+ public void testMultiPercolator_recovery() throws Exception {
+ percolatorRecovery(true);
+ }
+
+ // 3 nodes, 2 primary + 2 replicas per primary, so each node should have a copy of the data.
+ // We only start and stop nodes 2 and 3, so all requests should succeed and never be partial.
+ private void percolatorRecovery(final boolean multiPercolate) throws Exception {
+ logger.info("--> ensuring exactly 2 nodes");
+ cluster().ensureAtLeastNumNodes(2);
+ cluster().ensureAtMostNumNodes(2);
+ logger.info("--> Adding 3th node");
+ cluster().startNode(settingsBuilder().put("node.stay", true));
+
+ client().admin().indices().prepareDelete("_all").execute().actionGet();
+ ensureGreen();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 2)
+ )
+ .execute().actionGet();
+ ensureGreen();
+
+ final Client client = cluster().client(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return input.getAsBoolean("node.stay", false);
+ }
+ });
+ final int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ client.prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .execute().actionGet();
+
+ final AtomicBoolean run = new AtomicBoolean(true);
+ final CountDownLatch done = new CountDownLatch(1);
+ final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = jsonBuilder().startObject().field("field", "a").endObject();
+ while (run.get()) {
+ NodesInfoResponse nodesInfoResponse = client.admin().cluster().prepareNodesInfo()
+ .execute().actionGet();
+ String node2Id = null;
+ String node3Id = null;
+ for (NodeInfo nodeInfo : nodesInfoResponse) {
+ if ("node2".equals(nodeInfo.getNode().getName())) {
+ node2Id = nodeInfo.getNode().id();
+ } else if ("node3".equals(nodeInfo.getNode().getName())) {
+ node3Id = nodeInfo.getNode().id();
+ }
+ }
+
+ String preference;
+ if (node2Id == null && node3Id == null) {
+ preference = "_local";
+ } else if (node2Id == null || node3Id == null) {
+ if (node2Id != null) {
+ preference = "_prefer_node:" + node2Id;
+ } else {
+ preference = "_prefer_node:" + node3Id;
+ }
+ } else {
+ preference = "_prefer_node:" + (randomBoolean() ? node2Id : node3Id);
+ }
+
+ if (multiPercolate) {
+ MultiPercolateRequestBuilder builder = client
+ .prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+
+ for (int i = 0; i < numPercolateRequest; i++) {
+ if (randomBoolean()) {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ );
+ } else {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc)));
+ }
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertNoFailures(item.getResponse());
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(item.getResponse().getTotalShards()));
+ assertThat(item.getResponse().getCount(), equalTo((long) numQueries));
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+ } else {
+ PercolateResponse response;
+ if (randomBoolean()) {
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .setPreference(preference)
+ .execute().actionGet();
+ } else {
+ response = client.preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ .setPreference(preference)
+ .execute().actionGet();
+ }
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getCount(), equalTo((long) numQueries));
+ assertThat(response.getMatches().length, equalTo(numQueries));
+ }
+ }
+ } catch (Throwable t) {
+ logger.info("Error in percolate thread...", t);
+ run.set(false);
+ error.set(t);
+ } finally {
+ done.countDown();
+ }
+ }
+ };
+ new Thread(r).start();
+
+ Predicate<Settings> nodePredicate = new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return !input.getAsBoolean("node.stay", false);
+ }
+ };
+ try {
+ // 1 index, 2 primaries, 2 replicas per primary
+ for (int i = 0; i < 4; i++) {
+ cluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ cluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForGreenStatus() // We're confirm the shard settings, so green instead of yellow
+ .setWaitForActiveShards(6) // 3 nodes, so 6 shards (2 primaries, 4 replicas)
+ .execute().actionGet();
+ assertThat(error.get(), nullValue());
+ }
+ } finally {
+ run.set(false);
+ }
+ done.await();
+ assertThat(error.get(), nullValue());
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
new file mode 100644
index 0000000..caa9846
--- /dev/null
+++ b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.AlreadyExpiredException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST)
+public class TTLPercolatorTests extends ElasticsearchIntegrationTest {
+
+ private static final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL)
+ .build();
+ }
+
+ @Test
+ public void testPercolatingWithTimeToLive() throws Exception {
+ final Client client = client();
+ ensureGreen();
+
+ String precolatorMapping = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME)
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping(PercolatorService.TYPE_NAME, precolatorMapping)
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ long ttl = 1500;
+ long now = System.currentTimeMillis();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "kuku").setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setRefresh(true).setTTL(ttl).execute().actionGet();
+
+ IndicesStatsResponse response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ assertThat(response.getIndices().get("test").getTotal().getIndexing().getTotal().getIndexCount(), equalTo(2l));
+
+ PercolateResponse percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertNoFailures(percolateResponse);
+ if (percolateResponse.getMatches().length == 0) {
+ // OK, ttl + purgeInterval has passed (slow machine or many other tests were running at the same time
+ GetResponse getResponse = client.prepareGet("test", PercolatorService.TYPE_NAME, "kuku").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ long currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ assertThat(currentDeleteCount, equalTo(2l));
+ return;
+ }
+
+ assertThat(convertFromTextArray(percolateResponse.getMatches(), "test"), arrayContaining("kuku"));
+ long timeSpent = System.currentTimeMillis() - now;
+ long waitTime = ttl + PURGE_INTERVAL - timeSpent;
+ if (waitTime >= 0) {
+ Thread.sleep(waitTime); // Doesn't make sense to check the deleteCount before ttl has expired
+ }
+
+ // See comment in SimpleTTLTests
+ logger.info("Checking if the ttl purger has run");
+ long currentDeleteCount;
+ do {
+ response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ // This returns the number of delete operations stats (not Lucene delete count)
+ currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ }
+ while (currentDeleteCount < 2); // TTL deletes one doc, but it is indexed in the primary shard and replica shard.
+ assertThat(currentDeleteCount, equalTo(2l));
+
+ percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), emptyArray());
+ }
+
+
+ @Test
+ public void testEnsureTTLDoesNotCreateIndex() throws IOException, InterruptedException {
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 60) // 60 sec
+ .build()).get();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 1) // 60 sec
+ .build()).get();
+
+ for (int i = 0; i < 100; i++) {
+ logger.debug("index doc {} ", i);
+ try {
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setTTL(randomIntBetween(1, 500)).execute().actionGet();
+ } catch (MapperParsingException e) {
+ logger.info("failed indexing {}", i, e);
+ // if we are unlucky the TTL is so small that we see the expiry date is already in the past when
+ // we parse the doc ignore those...
+ assertThat(e.getCause(), Matchers.instanceOf(AlreadyExpiredException.class));
+ }
+
+ }
+ refresh();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ logger.debug("delete count [{}]", indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount());
+ // TTL deletes one doc, but it is indexed in the primary shard and replica shards
+ return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() != 0;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ cluster().wipeIndices("test");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java
new file mode 100644
index 0000000..1fbb0c6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/PluginManagerTests.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.PluginManager;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.junit.annotations.Network;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope = Scope.TEST, numNodes = 0, transportClientRatio = 0.0)
+public class PluginManagerTests extends ElasticsearchIntegrationTest {
+ private static final Settings SETTINGS = ImmutableSettings.settingsBuilder()
+ .put("discovery.zen.ping.multicast.enabled", false)
+ .put("force.http.enabled", true)
+ .build();
+ private static final String PLUGIN_DIR = "plugins";
+
+ @After
+ public void afterTest() {
+ deletePluginsFolder();
+ }
+
+ @Before
+ public void beforeTest() {
+ deletePluginsFolder();
+ }
+
+ @Test
+ public void testLocalPluginInstallSingleFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) we remove that folder while extracting
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_single_folder.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginInstallSiteFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) but it's called _site, we make it work
+ //we can either remove the folder while extracting and then re-add it manually or just leave it as it is
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_folder_site.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ String nodeName = cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginWithoutFolders() throws Exception {
+ //When we don't have folders at all in the top-level, but only files, we don't modify anything
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_without_folders.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginFolderAndFile() throws Exception {
+ //When we have a single top-level folder but also files in the top-level, we don't modify anything
+ String pluginName = "plugin-test";
+ URL url = PluginManagerTests.class.getResource("plugin_folder_file.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+
+ cluster().startNode(SETTINGS);
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testSitePluginWithSourceThrows() throws Exception {
+ String pluginName = "plugin-with-source";
+ URL url = PluginManagerTests.class.getResource("plugin_with_sourcefiles.zip");
+ downloadAndExtract(pluginName, "file://" + url.getFile());
+ }
+
+ /**
+ * We build a plugin manager instance which wait only for 30 seconds before
+ * raising an ElasticsearchTimeoutException
+ */
+ private static PluginManager pluginManager(String pluginUrl) {
+ Tuple<Settings, Environment> initialSettings = InternalSettingsPreparer.prepareSettings(
+ ImmutableSettings.settingsBuilder().build(), false);
+ if (!initialSettings.v2().pluginsFile().exists()) {
+ FileSystemUtils.mkdirs(initialSettings.v2().pluginsFile());
+ }
+ return new PluginManager(initialSettings.v2(), pluginUrl, PluginManager.OutputMode.SILENT, TimeValue.timeValueSeconds(30));
+ }
+
+ private static void downloadAndExtract(String pluginName, String pluginUrl) throws IOException {
+ pluginManager(pluginUrl).downloadAndExtract(pluginName);
+ }
+
+ private void assertPluginLoaded(String pluginName) {
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().clear().setPlugin(true).get();
+ assertThat(nodesInfoResponse.getNodes().length, equalTo(1));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos(), notNullValue());
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().size(), equalTo(1));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().get(0).getName(), equalTo(pluginName));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().get(0).isSite(), equalTo(true));
+ }
+
+ private void assertPluginAvailable(String pluginName) throws InterruptedException {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ final HttpClient httpClient = new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ logger.info("--> tested http address [{}]", httpServerTransport.info().getAddress());
+
+ //checking that the http connector is working properly
+ // We will try it for some seconds as it could happen that the REST interface is not yet fully started
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ HttpClientResponse response = httpClient.request("");
+ if (response.errorCode() != RestStatus.OK.getStatus()) {
+ // We want to trace what's going on here before failing the test
+ logger.info("--> error caught [{}], headers [{}]", response.errorCode(), response.getHeaders());
+ logger.info("--> cluster state [{}]", cluster().clusterService().state());
+ return false;
+ }
+ return true;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+
+
+ //checking now that the plugin is available
+ HttpClientResponse response = httpClient.request("_plugin/" + pluginName + "/");
+ assertThat(response, notNullValue());
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ }
+
+ @Test
+ public void testListInstalledEmpty() throws IOException {
+ File[] plugins = pluginManager(null).getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ }
+
+ @Test(expected = IOException.class)
+ public void testInstallPluginNull() throws IOException {
+ pluginManager(null).downloadAndExtract("");
+ }
+
+
+ @Test
+ public void testInstallPlugin() throws IOException {
+ PluginManager pluginManager = pluginManager("file://".concat(PluginManagerTests.class.getResource("plugin_with_classfile.zip").getFile()));
+
+ pluginManager.downloadAndExtract("plugin");
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+ }
+
+ @Test
+ public void testInstallSitePlugin() throws IOException {
+ PluginManager pluginManager = pluginManager("file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ pluginManager.downloadAndExtract("plugin-site");
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We want to check that Plugin Manager moves content to _site
+ String pluginDir = PLUGIN_DIR.concat("/plugin-site/_site");
+ assertThat(FileSystemUtils.exists(new File(pluginDir)), is(true));
+ }
+
+
+ private void singlePluginInstallAndRemove(String pluginShortName, String pluginCoordinates) throws IOException {
+ logger.info("--> trying to download and install [{}]", pluginShortName);
+ PluginManager pluginManager = pluginManager(pluginCoordinates);
+ try {
+ pluginManager.downloadAndExtract(pluginShortName);
+ File[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We remove it
+ pluginManager.removePlugin(pluginShortName);
+ plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ } catch (IOException e) {
+ logger.warn("--> IOException raised while downloading plugin [{}]. Skipping test.", e, pluginShortName);
+ } catch (ElasticsearchTimeoutException e) {
+ logger.warn("--> timeout exception raised while downloading plugin [{}]. Skipping test.", pluginShortName);
+ }
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: username/reponame/version
+ * It should find it in download.elasticsearch.org service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithElasticsearchDownloadService() throws IOException {
+ assumeTrue(isDownloadServiceWorking("http://download.elasticsearch.org/", "elasticsearch/ci-test.txt"));
+ singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/1.5.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: groupId/artifactId/version
+ * It should find it in maven central service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithMavenCentral() throws IOException {
+ assumeTrue(isDownloadServiceWorking("http://search.maven.org/", "/"));
+ singlePluginInstallAndRemove("org.elasticsearch/elasticsearch-transport-thrift/1.5.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test site plugins from github: userName/repoName
+ * It should find it on github
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithGithub() throws IOException {
+ assumeTrue(isDownloadServiceWorking("https://github.com/", "/"));
+ singlePluginInstallAndRemove("elasticsearch/kibana", null);
+ }
+
+ private boolean isDownloadServiceWorking(String url, String resource) {
+ HttpClient client = new HttpClient(url);
+ try {
+ if (client.request(resource).errorCode() != 200) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", url, resource);
+ return false;
+ }
+ return true;
+ } catch (Throwable t) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", url, resource);
+ }
+ return false;
+ }
+
+ private void deletePluginsFolder() {
+ FileSystemUtils.deleteRecursively(new File(PLUGIN_DIR));
+ }
+
+ @Test
+ public void testRemovePlugin() throws Exception {
+ // We want to remove plugin with plugin short name
+ singlePluginInstallAndRemove("plugintest", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ // We want to remove plugin with groupid/artifactid/version form
+ singlePluginInstallAndRemove("groupid/plugintest/1.0.0", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+
+ // We want to remove plugin with groupid/artifactid form
+ singlePluginInstallAndRemove("groupid/plugintest", "file://".concat(PluginManagerTests.class.getResource("plugin_without_folders.zip").getFile()));
+ }
+
+ @Test(expected = ElasticsearchIllegalArgumentException.class)
+ public void testRemovePluginWithURLForm() throws Exception {
+ PluginManager pluginManager = pluginManager(null);
+ pluginManager.removePlugin("file://whatever");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java b/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java
new file mode 100644
index 0000000..a2d20da
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/ResponseHeaderPluginTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.plugin.responseheader.TestResponseHeaderPlugin;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test a rest action that sets special response headers
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class ResponseHeaderPluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("plugin.types", TestResponseHeaderPlugin.class.getName())
+ .put("force.http.enabled", true)
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatSettingHeadersWorks() throws Exception {
+ ensureGreen();
+ HttpClientResponse response = httpClient().request("/_protected");
+ assertThat(response.errorCode(), equalTo(RestStatus.UNAUTHORIZED.getStatus()));
+ assertThat(response.getHeader("Secret"), equalTo("required"));
+
+ Map<String, String> headers = Maps.newHashMap();
+ headers.put("Secret", "password");
+ HttpClientResponse authResponse = httpClient().request("GET", "_protected", headers);
+ assertThat(authResponse.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(authResponse.getHeader("Secret"), equalTo("granted"));
+ }
+
+ private HttpClient httpClient() {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ return new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/plugin/SitePluginTests.java b/src/test/java/org/elasticsearch/plugin/SitePluginTests.java
new file mode 100644
index 0000000..465a355
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/SitePluginTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.helper.HttpClient;
+import org.elasticsearch.rest.helper.HttpClientResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URISyntaxException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * We want to test site plugins
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class SitePluginTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ try {
+ File pluginDir = new File(SitePluginTests.class.getResource("/org/elasticsearch/plugin").toURI());
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.plugins", pluginDir.getAbsolutePath())
+ .put("force.http.enabled", true)
+ .build();
+ } catch (URISyntaxException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ public HttpClient httpClient(String id) {
+ HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
+ return new HttpClient(httpServerTransport.boundAddress().publishAddress());
+ }
+
+ @Test
+ public void testRedirectSitePlugin() throws Exception {
+ // We use an HTTP Client to test redirection
+ HttpClientResponse response = httpClient("test").request("/_plugin/dummy");
+ assertThat(response.errorCode(), equalTo(RestStatus.MOVED_PERMANENTLY.getStatus()));
+ assertThat(response.response(), containsString("/_plugin/dummy/"));
+
+ // We test the real URL
+ response = httpClient("test").request("/_plugin/dummy/");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test direct access to an existing file (index.html)
+ */
+ @Test
+ public void testAnyPage() throws Exception {
+ HttpClientResponse response = httpClient("test").request("/_plugin/dummy/index.html");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test case for #4845: https://github.com/elasticsearch/elasticsearch/issues/4845
+ * Serving _site plugins do not pick up on index.html for sub directories
+ */
+ @Test
+ public void testWelcomePageInSubDirs() throws Exception {
+ HttpClientResponse response = httpClient("test").request("/_plugin/subdir/dir/");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin (subdir)</title>"));
+
+ response = httpClient("test").request("/_plugin/subdir/dir_without_index/");
+ assertThat(response.errorCode(), equalTo(RestStatus.FORBIDDEN.getStatus()));
+
+ response = httpClient("test").request("/_plugin/subdir/dir_without_index/page.html");
+ assertThat(response.errorCode(), equalTo(RestStatus.OK.getStatus()));
+ assertThat(response.response(), containsString("<title>Dummy Site Plugin (page)</title>"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java
new file mode 100644
index 0000000..86304e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugin.responseheader;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.rest.RestModule;
+
+public class TestResponseHeaderPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-header";
+ }
+
+ @Override
+ public String description() {
+ return "test-plugin-custom-header-desc";
+ }
+
+ public void onModule(RestModule restModule) {
+ restModule.addRestAction(TestResponseHeaderRestAction.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java
new file mode 100644
index 0000000..4d1c9eb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/plugin/responseheader/TestResponseHeaderRestAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.responseheader;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+public class TestResponseHeaderRestAction extends BaseRestHandler {
+
+ @Inject
+ public TestResponseHeaderRestAction(Settings settings, Client client, RestController controller) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_protected", this);
+ }
+
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) {
+ if ("password".equals(request.header("Secret"))) {
+ RestResponse response = new StringRestResponse(RestStatus.OK, "Access granted");
+ response.addHeader("Secret", "granted");
+ channel.sendResponse(response);
+ } else {
+ RestResponse response = new StringRestResponse(RestStatus.UNAUTHORIZED, "Access denied");
+ response.addHeader("Secret", "required");
+ channel.sendResponse(response);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java b/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
new file mode 100644
index 0000000..61b235d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numNodes = 0, transportClientRatio = 0.0)
+public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
+
+ protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) {
+ ClusterHealthResponse clusterHealth = requestBuilder.get();
+ if (clusterHealth.isTimedOut()) {
+ logger.info("cluster health request timed out:\n{}", clusterHealth);
+ fail("cluster health request timed out");
+ }
+ }
+
+ @Test
+ @Slow
+ @TestLogging("indices.cluster:TRACE,cluster.service:TRACE")
+ public void testFullRollingRestart() throws Exception {
+ cluster().startNode();
+ createIndex("test");
+
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+ client().admin().indices().prepareFlush().execute().actionGet();
+ for (int i = 1000; i < 2000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+
+ // now start adding nodes
+ cluster().startNode();
+ cluster().startNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+ // now start adding nodes
+ cluster().startNode();
+ cluster().startNode();
+
+ // We now have 5 nodes
+ setMinimumMasterNodes(3);
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("5"));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // now start shutting nodes down
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("4"));
+
+ // going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
+ // node, but that's OK as it is set to 3 before.
+ setMinimumMasterNodes(2);
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // closing the 3rd node
+ cluster().stopRandomNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("2"));
+
+ // closing the 2nd node
+ setMinimumMasterNodes(1);
+ cluster().stopRandomNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java b/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
new file mode 100644
index 0000000..68e519b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
@@ -0,0 +1,460 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(RecoveryWhileUnderLoadTests.class);
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadAllocateBackupsTest() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 1));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ client.admin().indices().prepareFlush().execute().actionGet();
+ }
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ // now start another node, while we index
+ allowNodes("test", 2);
+
+ logger.info("--> waiting for GREEN health status ...");
+ // make sure the cluster state is green, and all has been recovered
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> waiting for 15000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 15000 docs indexed");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadAllocateBackupsRelocatePrimariesTest() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 1));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ logger.info("--> starting {} indexing threads", writers.length);
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
+
+
+ logger.info("--> waiting for 15000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 15000 docs indexed");
+
+ stop.set(true);
+ stopLatch.await();
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ @Slow
+ public void recoverWhileUnderLoadWithNodeShutdown() throws Exception {
+ logger.info("--> creating test index ...");
+ assertAcked(prepareCreate("test", 2));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[5];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ long id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ waitForDocs(2000);
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+
+ logger.info("--> waiting for 4000 docs to be indexed ...");
+ waitForDocs(4000);
+ logger.info("--> 4000 docs indexed");
+
+ // now start more nodes, while we index
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
+
+
+ logger.info("--> waiting for 10000 docs to be indexed ...");
+ waitForDocs(15000);
+ logger.info("--> 10000 docs indexed");
+
+ // now, shutdown nodes
+ logger.info("--> allow 3 nodes for index [test] ...");
+ allowNodes("test", 3);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=3").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ allowNodes("test", 2);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> allow 1 nodes for index [test] ...");
+ allowNodes("test", 1);
+ logger.info("--> waiting for YELLOW health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForNodes(">=1").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForNodes(">=1").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(5, indexCounter.get(), 10);
+
+ }
+
+ @Test
+ @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE,action.index:TRACE,action.support.replication:TRACE,cluster.service:DEBUG")
+ @Slow
+ public void recoverWhileRelocating() throws Exception {
+ final int numShards = between(2, 10);
+ final int numReplicas = 0;
+ cluster().ensureAtLeastNumNodes(3);
+ logger.info("--> creating test index ...");
+ int allowNodes = 2;
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_shards", numShards).put("number_of_replicas", numReplicas).build()));
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[atLeast(3)];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> starting {} indexing threads", writers.length);
+ final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final Client client = client();
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ long id = -1;
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ id = idGenerator.incrementAndGet();
+ client.prepareIndex("test", "type1", Long.toString(id) + "-" + indexerId)
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + id).map()).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Throwable e) {
+ failures.add(e);
+ logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ final int numDocs = between(10000, 50000);
+ for (int i = 0; i < numDocs; i += between(100, 1000)) {
+ assertThat(failures, emptyIterable());
+ logger.info("--> waiting for {} docs to be indexed ...", i);
+ waitForDocs(i);
+ logger.info("--> {} docs indexed", i);
+ allowNodes = 2 / allowNodes;
+ allowNodes("test", allowNodes);
+ logger.info("--> waiting for GREEN health status ...");
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+ }
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ assertThat(failures, emptyIterable());
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+ logger.info("--> bump up number of replicas to 1 and allow all nodes to hold the index");
+ allowNodes("test", 3);
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.settingsBuilder().put("number_of_replicas", 1)).get());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numShards, indexCounter.get(), 10);
+ }
+
+ private void iterateAssertCount(final int numberOfShards, final long numberOfDocs, final int iterations) throws Exception {
+ SearchResponse[] iterationResults = new SearchResponse[iterations];
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setQuery(matchAllQuery()).get();
+ logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse);
+ iterationResults[i] = searchResponse;
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+
+ if (error) {
+ //Printing out shards and their doc count
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get();
+ for (ShardStats shardStats : indicesStatsResponse.getShards()) {
+ DocsStats docsStats = shardStats.getStats().docs;
+ logger.info("shard [{}] - count {}, primary {}", shardStats.getShardId(), docsStats.getCount(), shardStats.getShardRouting().primary());
+ }
+
+ //if there was an error we try to wait and see if at some point it'll get fixed
+ logger.info("--> trying to wait");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setQuery(matchAllQuery()).get();
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+ return !error;
+ }
+ }, 5, TimeUnit.MINUTES), equalTo(true));
+ }
+
+ //lets now make the test fail if it was supposed to fail
+ for (int i = 0; i < iterations; i++) {
+ assertHitCount(iterationResults[i], numberOfDocs);
+ }
+ }
+
+ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iteration, SearchResponse searchResponse) {
+ logger.info("iteration [{}] - successful shards: {} (expected {})", iteration, searchResponse.getSuccessfulShards(), numberOfShards);
+ logger.info("iteration [{}] - failed shards: {} (expected 0)", iteration, searchResponse.getFailedShards());
+ if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
+ logger.info("iteration [{}] - shard failures: {}", iteration, Arrays.toString(searchResponse.getShardFailures()));
+ }
+ logger.info("iteration [{}] - returned documents: {} (expected {})", iteration, searchResponse.getHits().totalHits(), numberOfDocs);
+ }
+
+ private void refreshAndAssert() throws InterruptedException {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ try {
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet.getTotalShards() == actionGet.getSuccessfulShards();
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }, 5, TimeUnit.MINUTES), equalTo(true));
+ }
+
+ private void waitForDocs(final long numDocs) throws InterruptedException {
+ final long[] lastKnownCount = {-1};
+ long lastStartCount = -1;
+ Predicate<Object> testDocs = new Predicate<Object>() {
+ public boolean apply(Object o) {
+ lastKnownCount[0] = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount[0], numDocs);
+ return lastKnownCount[0] > numDocs;
+ }
+ };
+ // 5 minutes seems like a long time but while relocating, indexing threads can wait for up to ~1m before retrying when
+ // they first try to index into a shard which is not STARTED.
+ while (!awaitBusy(testDocs, 5, TimeUnit.MINUTES)) {
+ if (lastStartCount == lastKnownCount[0]) {
+ // we didn't make any progress
+ fail("failed to reach " + numDocs + "docs");
+ }
+ lastStartCount = lastKnownCount[0];
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/RelocationTests.java b/src/test/java/org/elasticsearch/recovery/RelocationTests.java
new file mode 100644
index 0000000..c9c24e0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/RelocationTests.java
@@ -0,0 +1,419 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.procedures.IntProcedure;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+
+@ClusterScope(scope=Scope.TEST, numNodes=0)
+public class RelocationTests extends ElasticsearchIntegrationTest {
+ private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES);
+
+
+ @Test
+ public void testSimpleRelocationNoIndexing() {
+ logger.info("--> starting [node1] ...");
+ final String node_1 = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .execute().actionGet();
+
+ logger.info("--> index 10 docs");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ logger.info("--> flush so we have an actual index");
+ client().admin().indices().prepareFlush().execute().actionGet();
+ logger.info("--> index more docs so we have something in the translog");
+ for (int i = 10; i < 20; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ logger.info("--> verifying count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+
+ logger.info("--> start another node");
+ final String node_2 = cluster().startNode();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> relocate the shard from node1 to node2");
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet();
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verifying count again...");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+ }
+
+ @Test
+ @Slow
+ public void testPrimaryRelocationWhileIndexingRandom() throws Exception {
+ int numRelocations = atLeast(rarely() ? 3 : 1);
+ int numWriters = atLeast(rarely() ? 3 : 1);
+ boolean batch = getRandom().nextBoolean();
+ logger.info("testPrimaryRelocationWhileIndexingRandom(numRelocations={}, numWriters={}, batch={}",
+ numRelocations, numWriters, batch);
+ testPrimaryRelocationWhileIndexing(numRelocations, numWriters, batch);
+ }
+
+
+
+ private void testPrimaryRelocationWhileIndexing(final int numberOfRelocations, final int numberOfWriters, final boolean batch) throws Exception {
+ String[] nodes = new String[2];
+ logger.info("--> starting [node1] ...");
+ nodes[0] = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+
+ logger.info("--> starting [node2] ...");
+ nodes[1] = cluster().startNode();
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[numberOfWriters];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final Client perThreadClient = client();
+ final int indexerId = i;
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ BulkRequestBuilder bulkRequest = perThreadClient.prepareBulk();
+ for (int i = 0; i < 100; i++) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ bulkRequest.add(perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ logger.warn("**** failed bulk indexing thread {}, {}/{}", indexerId, bulkItemResponse.getFailure().getId(), bulkItemResponse.getFailure().getMessage());
+ }
+ }
+ } else {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Exception e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ while (client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() < 2000) {
+ Thread.sleep(100);
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ }
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> starting relocations...");
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (i % 2);
+ int toNode = fromNode == 0 ? 1 : 0;
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+ }
+ logger.info("--> done relocations");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ logger.info("--> searching the index");
+ boolean ranOnce = false;
+ for (int i = 0; i < 10; i++) {
+ try {
+ logger.info("--> START search test round {}", i + 1);
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexCounter.get()).setNoFields().execute().actionGet().getHits();
+ ranOnce = true;
+ if (hits.totalHits() != indexCounter.get()) {
+ int[] hitIds = new int[(int) indexCounter.get()];
+ for (int hit = 0; hit < indexCounter.get(); hit++) {
+ hitIds[hit] = hit + 1;
+ }
+ IntOpenHashSet set = IntOpenHashSet.from(hitIds);
+ for (SearchHit hit : hits.hits()) {
+ int id = Integer.parseInt(hit.id());
+ if (!set.remove(id)) {
+ logger.error("Extra id [{}]", id);
+ }
+ }
+ set.forEach(new IntProcedure() {
+
+ @Override
+ public void apply(int value) {
+ logger.error("Missing id [{}]", value);
+ }
+
+ });
+ }
+ assertThat(hits.totalHits(), equalTo(indexCounter.get()));
+ logger.info("--> DONE search test round {}", i + 1);
+ } catch (SearchPhaseExecutionException ex) {
+ // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough?
+ logger.warn("Got exception while searching.", ex);
+ }
+ }
+ if (!ranOnce) {
+ fail();
+ }
+ }
+
+ @Test
+ @Slow
+ public void testReplicaRelocationWhileIndexingRandom() throws Exception {
+ int numRelocations = atLeast(rarely() ? 3 : 1);
+ int numWriters = atLeast(rarely() ? 3 : 1);
+ boolean batch = getRandom().nextBoolean();
+ logger.info("testReplicaRelocationWhileIndexing(numRelocations={}, numWriters={}, batch={}", numRelocations, numWriters, batch);
+ testReplicaRelocationWhileIndexing(numRelocations, numWriters, batch);
+ }
+
+ private void testReplicaRelocationWhileIndexing(final int numberOfRelocations, final int numberOfWriters, final boolean batch) throws Exception {
+ logger.info("--> starting [node1] ...");
+ String[] nodes = new String[3];
+ nodes[0] = cluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ ).execute().actionGet();
+
+ logger.info("--> starting [node2] ...");
+ nodes[1] = cluster().startNode();
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> starting [node3] ...");
+ nodes[2] = cluster().startNode();
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] writers = new Thread[numberOfWriters];
+ final CountDownLatch stopLatch = new CountDownLatch(writers.length);
+
+ logger.info("--> starting {} indexing threads", writers.length);
+ for (int i = 0; i < writers.length; i++) {
+ final Client perThreadClient = client();
+ final int indexerId = i;
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+
+ try {
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ BulkRequestBuilder bulkRequest = perThreadClient.prepareBulk();
+ for (int i = 0; i < 100; i++) {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ bulkRequest.add(perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ logger.warn("**** failed bulk indexing thread {}, {}/{}", indexerId, bulkItemResponse.getFailure().getId(), bulkItemResponse.getFailure().getMessage());
+ }
+ }
+ } else {
+ long id = idGenerator.incrementAndGet();
+ if (id % 1000 == 0) {
+ perThreadClient.admin().indices().prepareFlush().execute().actionGet();
+ }
+ perThreadClient.prepareIndex("test", "type1", Long.toString(id))
+ .setSource("test", "value" + id).execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {}", indexerId);
+ } catch (Exception e) {
+ logger.warn("**** failed indexing thread {}", e, indexerId);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ logger.info("--> waiting for 2000 docs to be indexed ...");
+ while (client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() < 2000) {
+ Thread.sleep(100);
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ }
+ logger.info("--> 2000 docs indexed");
+
+ logger.info("--> starting relocations...");
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (1 + (i % 2));
+ int toNode = fromNode == 1 ? 2 : 1;
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+ }
+ logger.info("--> done relocations");
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ stop.set(true);
+ stopLatch.await();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ logger.info("--> searching the index");
+ boolean ranOnce = false;
+ for (int i = 0; i < 10; i++) {
+ try {
+ logger.info("--> START search test round {}", i + 1);
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexCounter.get()).setNoFields().execute().actionGet().getHits();
+ ranOnce = true;
+ if (hits.totalHits() != indexCounter.get()) {
+ int[] hitIds = new int[(int) indexCounter.get()];
+ for (int hit = 0; hit < indexCounter.get(); hit++) {
+ hitIds[hit] = hit + 1;
+ }
+ IntOpenHashSet set = IntOpenHashSet.from(hitIds);
+ for (SearchHit hit : hits.hits()) {
+ int id = Integer.parseInt(hit.id());
+ if (!set.remove(id)) {
+ logger.error("Extra id [{}]", id);
+ }
+ }
+ set.forEach(new IntProcedure() {
+
+ @Override
+ public void apply(int value) {
+ logger.error("Missing id [{}]", value);
+ }
+ });
+ }
+ assertThat(hits.totalHits(), equalTo(indexCounter.get()));
+ logger.info("--> DONE search test round {}", i + 1);
+ } catch (SearchPhaseExecutionException ex) {
+ // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough?
+ logger.warn("Got exception while searching.", ex);
+ }
+ }
+ if (!ranOnce) {
+ fail();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
new file mode 100644
index 0000000..fee8e96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleRecoveryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return recoverySettings();
+ }
+
+ protected Settings recoverySettings() {
+ return ImmutableSettings.Builder.EMPTY_SETTINGS;
+ }
+
+ @Test
+ public void testSimpleRecovery() throws Exception {
+ prepareCreate("test", 1).execute().actionGet(5000);
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus()).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ FlushResponse flushResponse = client().admin().indices().flush(flushRequest("test")).actionGet();
+ assertThat(flushResponse.getTotalShards(), equalTo(10));
+ assertThat(flushResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(flushResponse.getFailedShards(), equalTo(0));
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(10));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(5));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ allowNodes("test", 2);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().local(true).waitForNodes(">=2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ GetResponse getResult;
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+
+ // now start another one so we move some primaries
+ allowNodes("test", 3);
+ Thread.sleep(200);
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes(">=3")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet(1000);
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
new file mode 100644
index 0000000..a69abf4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallFileChunkSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.file_chunk_size", "3b").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
new file mode 100644
index 0000000..7ddabae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogOpsRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.translog_ops", 1).build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java b/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
new file mode 100644
index 0000000..d74940c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return ImmutableSettings.settingsBuilder().put("index.shard.recovery.translog_size", "3b").build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/helper/HttpClient.java b/src/test/java/org/elasticsearch/rest/helper/HttpClient.java
new file mode 100644
index 0000000..4ec352d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/helper/HttpClient.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.helper;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.List;
+import java.util.Map;
+
+public class HttpClient {
+
+ private final URL baseUrl;
+
+ public HttpClient(TransportAddress transportAddress) {
+ InetSocketAddress address = ((InetSocketTransportAddress) transportAddress).address();
+ try {
+ baseUrl = new URL("http", address.getAddress().getHostAddress(), address.getPort(), "/");
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("", e);
+ }
+ }
+
+ public HttpClient(String url) {
+ try {
+ baseUrl = new URL(url);
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("", e);
+ }
+ }
+
+ public HttpClient(URL url) {
+ baseUrl = url;
+ }
+
+ public HttpClientResponse request(String path) {
+ return request("GET", path);
+ }
+
+ public HttpClientResponse request(String method, String path) {
+ return request(method, path, null);
+ }
+
+ public HttpClientResponse request(String method, String path, Map<String, String> headers) {
+ URL url;
+ try {
+ url = new URL(baseUrl, path);
+ } catch (MalformedURLException e) {
+ throw new ElasticsearchException("Cannot parse " + path, e);
+ }
+
+ HttpURLConnection urlConnection;
+ try {
+ urlConnection = (HttpURLConnection) url.openConnection();
+ urlConnection.setRequestMethod(method);
+ if (headers != null) {
+ for (Map.Entry<String, String> headerEntry : headers.entrySet()) {
+ urlConnection.setRequestProperty(headerEntry.getKey(), headerEntry.getValue());
+ }
+ }
+ urlConnection.connect();
+ } catch (IOException e) {
+ throw new ElasticsearchException("", e);
+ }
+
+ int errorCode = -1;
+ Map<String, List<String>> respHeaders = null;
+ try {
+ errorCode = urlConnection.getResponseCode();
+ respHeaders = urlConnection.getHeaderFields();
+ InputStream inputStream = urlConnection.getInputStream();
+ String body = null;
+ try {
+ body = Streams.copyToString(new InputStreamReader(inputStream, Charsets.UTF_8));
+ } catch (IOException e1) {
+ throw new ElasticsearchException("problem reading error stream", e1);
+ }
+ return new HttpClientResponse(body, errorCode, respHeaders, null);
+ } catch (IOException e) {
+ InputStream errStream = urlConnection.getErrorStream();
+ String body = null;
+ if (errStream != null) {
+ try {
+ body = Streams.copyToString(new InputStreamReader(errStream, Charsets.UTF_8));
+ } catch (IOException e1) {
+ throw new ElasticsearchException("problem reading error stream", e1);
+ }
+ }
+ return new HttpClientResponse(body, errorCode, respHeaders, e);
+ } finally {
+ urlConnection.disconnect();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java b/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java
new file mode 100644
index 0000000..2abc6b6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/helper/HttpClientResponse.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest.helper;
+
+import java.util.List;
+import java.util.Map;
+
+public class HttpClientResponse {
+ private final String response;
+ private final int errorCode;
+ private Map<String, List<String>> headers;
+ private final Throwable e;
+
+ public HttpClientResponse(String response, int errorCode, Map<String, List<String>> headers, Throwable e) {
+ this.response = response;
+ this.errorCode = errorCode;
+ this.headers = headers;
+ this.e = e;
+ }
+
+ public String response() {
+ return response;
+ }
+
+ public int errorCode() {
+ return errorCode;
+ }
+
+ public Throwable cause() {
+ return e;
+ }
+
+ public Map<String, List<String>> getHeaders() {
+ return headers;
+ }
+
+ public String getHeader(String name) {
+ if (headers == null) {
+ return null;
+ }
+ List<String> vals = headers.get(name);
+ if (vals == null || vals.size() == 0) {
+ return null;
+ }
+ return vals.iterator().next();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java b/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
new file mode 100644
index 0000000..b676ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.util;
+
+import org.elasticsearch.rest.support.RestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RestUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDecodeQueryString() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?test=value";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("test"), equalTo("value"));
+
+ params.clear();
+ uri = "something?test=value&test1=value1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("test"), equalTo("value"));
+ assertThat(params.get("test1"), equalTo("value1"));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, uri.length(), params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, -1, params);
+ assertThat(params.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDecodeQueryStringEdgeCases() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?p=v&&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("a"), equalTo(""));
+
+ params.clear();
+ uri = "something?p=v&a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+
+ params.clear();
+ uri = "something?p=v&a&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(3));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?p=v&a&b&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(4));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("b"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java
new file mode 100644
index 0000000..807d2dd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/river/RiverTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.river.dummy.DummyRiverModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class RiverTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRiverStart() throws Exception {
+ startAndCheckRiverIsStarted("dummy-river-test");
+ }
+
+ @Test
+ public void testMultipleRiversStart() throws Exception {
+ int nbRivers = between(2,10);
+ logger.info("--> testing with {} rivers...", nbRivers);
+ Thread[] riverCreators = new Thread[nbRivers];
+ final CountDownLatch latch = new CountDownLatch(nbRivers);
+ final MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
+ for (int i = 0; i < nbRivers; i++) {
+ final String riverName = "dummy-river-test-" + i;
+ riverCreators[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ startRiver(riverName);
+ } catch (Throwable t) {
+ logger.warn("failed to register river {}", t, riverName);
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ riverCreators[i].start();
+ multiGetRequestBuilder.add(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status");
+ }
+
+ latch.await();
+
+ logger.info("--> checking that all rivers were created");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ MultiGetResponse multiGetItemResponse = multiGetRequestBuilder.get();
+ for (MultiGetItemResponse getItemResponse : multiGetItemResponse) {
+ if (getItemResponse.isFailed() || !getItemResponse.getResponse().isExists()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithDefaultTemplate() throws Exception {
+ logger.info("--> create empty template");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-default-template-test");
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithSomeTemplates() throws Exception {
+ logger.info("--> create some templates");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping("atype",
+ JsonXContent.contentBuilder().startObject().startObject("atype")
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-template-test");
+ }
+
+ /**
+ * Create a Dummy river then check it has been started. We will fail after 5 seconds.
+ * @param riverName Dummy river needed to be started
+ */
+ private void startAndCheckRiverIsStarted(final String riverName) throws InterruptedException {
+ startRiver(riverName);
+ checkRiverIsStarted(riverName);
+ }
+
+ private void startRiver(final String riverName) {
+ logger.info("--> starting river [{}]", riverName);
+ IndexResponse indexResponse = client().prepareIndex(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_meta")
+ .setSource("type", DummyRiverModule.class.getCanonicalName()).get();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ private void checkRiverIsStarted(final String riverName) throws InterruptedException {
+ logger.info("--> checking that river [{}] was created", riverName);
+ assertThat(awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object obj) {
+ GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status").get();
+ return response.isExists();
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
new file mode 100644
index 0000000..861fdf7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testResolveIndexRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias110").searchRouting("1,0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias12").routing("2")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias"), nullValue());
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias10"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias20"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias21"), equalTo("1"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("3", "test1"), equalTo("3"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("0", "alias10"), equalTo("0"));
+ try {
+ clusterService().state().metaData().resolveIndexRouting("1", "alias10");
+ fail("should fail");
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // all is well, we can't have two mappings, one provided, and one in the alias
+ }
+
+ try {
+ clusterService().state().metaData().resolveIndexRouting(null, "alias0");
+ fail("should fail");
+ } catch (ElasticsearchIllegalArgumentException ex) {
+ // Expected
+ }
+ }
+
+
+ @Test
+ public void testResolveSearchRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", "alias"), equalTo(newMap("test1", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", "alias10"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias20", "alias21"}),
+ equalTo(newMap("test2", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"test1", "alias10"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "test1"}), nullValue());
+
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias20"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"test1", "alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1"))));
+ }
+
+ private <T> Set<T> newSet(T... elements) {
+ return newHashSet(elements);
+ }
+
+
+ private <K, V> Map<K, V> newMap(K key, V value) {
+ Map<K, V> r = newHashMap();
+ r.put(key, value);
+ return r;
+ }
+
+ private <K, V> Map<K, V> newMap(K key1, V value1, K key2, V value2) {
+ Map<K, V> r = newHashMap();
+ r.put(key1, value1);
+ r.put(key2, value2);
+ return r;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
new file mode 100644
index 0000000..adb815c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
@@ -0,0 +1,434 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ *
+ */
+public class AliasRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testAliasCrudRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> verifying get with routing alias, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> updating with id [1] and routing through alias");
+ client().prepareUpdate("alias0", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field = 'value2'")
+ .execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing alias, should delete");
+ client().prepareDelete("alias0", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with 1 as routing, should not delete anything");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with alias0, should delete");
+ client().prepareDeleteByQuery("alias0").setQuery(matchAllQuery()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testAliasSearchRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias"))
+ .addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
+ .addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
+ .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1] using alias");
+ client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ assertThat(client().prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with two routing aliases , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias0, alias1 and alias01, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with test, alias0 and alias1, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ }
+
+ @Test
+ public void testAliasSearchRoutingWithTwoIndices() throws Exception {
+ createIndex("test-a");
+ createIndex("test-b");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+ ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
+ logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
+ client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-a0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> indexing with id [0], and routing [1] using alias to test-b");
+ client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-b1", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+
+ logger.info("--> search with alias-a1,alias-b0, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with alias-ab, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias-a0,alias-b1 should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/issues/2682
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShards.
+ That affected the number of shards that we executed the search on, thus some documents were missing in the search results.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+
+ logger.info("--> search all on index_* should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/pull/3268
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShardsCount.
+ That could cause returning 1, which led to forcing the QUERY_AND_FETCH mode.
+ As a result, (size * number of hit shards) results were returned and no reduce phase was taking place.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+
+ logger.info("--> search all on index_* should find two");
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2L));
+ //Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request
+ //Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced.
+ assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+ }
+
+ @Test
+ public void testRequiredRoutingMappingWithAlias() throws Exception {
+ prepareCreate("test").addMapping(
+ "type1",
+ XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true)
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> indexing with id [1], with no routing, should fail");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> bulk deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareBulk().add(Requests.deleteRequest("test").type("type1").id("1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testIndexingAliasesOverTime() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> creating alias with routing [3]");
+ IndicesAliasesResponse res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("3")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> indexing with id [0], and routing [3]");
+ client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> creating alias with routing [4]");
+ res = admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("4")).get();
+ assertThat(res.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verifying search with wrong routing should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> creating alias with search routing [3,4] and index routing 4");
+ client().admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4"))
+ .execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [4]");
+ client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
new file mode 100644
index 0000000..338e7e1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
@@ -0,0 +1,440 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.termvector.MultiTermVectorsResponse;
+import org.elasticsearch.action.termvector.TermVectorRequest;
+import org.elasticsearch.action.termvector.TermVectorResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SimpleRoutingTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSimpleCrudRouting() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing, should delete");
+ client().prepareDelete("test", "type1", "1").setRouting("0").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with 1 as routing, should not delete anything");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting_by_query with , should delete");
+ client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("0").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testSimpleSearchRouting() {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1]");
+ client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0,1,0 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingMapping() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should fail");
+
+ logger.info("--> indexing with id [1], with no routing, should fail");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> bulk deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareBulk().add(Requests.deleteRequest("test").type("type1").id("1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathMapping() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+
+ logger.info("--> check failure with different routing");
+ try {
+ client().prepareIndex("test", "type1", "1").setRouting("1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(MapperParsingException.class));
+ }
+
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathMappingBulk() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareBulk().add(
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", "0")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingWithPathNumericType() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1", "routing_field", 0).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet("test", "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequiredRoutingMapping_variousAPIs() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").get();
+ logger.info("--> indexing with id [2], and routing [0]");
+ client().prepareIndex("test", "type1", "2").setRouting("0").setSource("field", "value2").setRefresh(true).get();
+
+ logger.info("--> verifying get with id [1] with routing [0], should succeed");
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> verifying get with id [1], with no routing, should fail");
+ try {
+ client().prepareGet("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying explain with id [2], with routing [0], should succeed");
+ ExplainResponse explainResponse = client().prepareExplain("test", "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setRouting("0").get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+
+ logger.info("--> verifying explain with id [2], with no routing, should fail");
+ try {
+ client().prepareExplain("test", "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
+ }
+
+ logger.info("--> verifying term vector with id [1], with routing [0], should succeed");
+ TermVectorResponse termVectorResponse = client().prepareTermVector("test", "type1", "1").setRouting("0").get();
+ assertThat(termVectorResponse.isExists(), equalTo(true));
+ assertThat(termVectorResponse.getId(), equalTo("1"));
+
+ try {
+ client().prepareTermVector("test", "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1").setRouting("0")
+ .setDoc("field1", "value1").get();
+ assertThat(updateResponse.getId(), equalTo("1"));
+ assertThat(updateResponse.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareUpdate("test", "type1", "1").setDoc("field1", "value1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying mget with ids [1,2], with routing [0], should succeed");
+ MultiGetResponse multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1").routing("0"))
+ .add(new MultiGetRequest.Item("test", "type1", "2").routing("0")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+
+ logger.info("--> verifying mget with ids [1,2], with no routing, should fail");
+ multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "type1", "1"))
+ .add(new MultiGetRequest.Item("test", "type1", "2")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+
+ MultiTermVectorsResponse multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorRequest("test", "type1", "1").routing("0"))
+ .add(new TermVectorRequest("test", "type1", "2").routing("0")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().isExists(), equalTo(true));
+
+ multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorRequest("test", "type1", "1"))
+ .add(new TermVectorRequest("test", "type1", "2")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse(), nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse(),nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required, but hasn't been specified"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/src/test/java/org/elasticsearch/script/IndexLookupTests.java
new file mode 100644
index 0000000..5d1aa6b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/IndexLookupTests.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class IndexLookupTests extends ElasticsearchIntegrationTest {
+
+ String includeAllFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS | _CACHE";
+ String includeAllWithoutRecordFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS ";
+ private HashMap<String, List<Object>> expectedEndOffsetsArray;
+ private HashMap<String, List<Object>> expectedPayloadsArray;
+ private HashMap<String, List<Object>> expectedPositionsArray;
+ private HashMap<String, List<Object>> emptyArray;
+ private HashMap<String, List<Object>> expectedStartOffsetsArray;
+
+ void initTestData() throws InterruptedException, ExecutionException, IOException {
+ emptyArray = new HashMap<String, List<Object>>();
+ List<Object> empty1 = new ArrayList<Object>();
+ empty1.add(-1);
+ empty1.add(-1);
+ emptyArray.put("1", empty1);
+ List<Object> empty2 = new ArrayList<Object>();
+ empty2.add(-1);
+ empty2.add(-1);
+ emptyArray.put("2", empty2);
+ List<Object> empty3 = new ArrayList<Object>();
+ empty3.add(-1);
+ empty3.add(-1);
+ emptyArray.put("3", empty3);
+
+ expectedPositionsArray = new HashMap<String, List<Object>>();
+
+ List<Object> pos1 = new ArrayList<Object>();
+ pos1.add(1);
+ pos1.add(2);
+ expectedPositionsArray.put("1", pos1);
+ List<Object> pos2 = new ArrayList<Object>();
+ pos2.add(0);
+ pos2.add(1);
+ expectedPositionsArray.put("2", pos2);
+ List<Object> pos3 = new ArrayList<Object>();
+ pos3.add(0);
+ pos3.add(4);
+ expectedPositionsArray.put("3", pos3);
+
+ expectedPayloadsArray = new HashMap<String, List<Object>>();
+ List<Object> pay1 = new ArrayList<Object>();
+ pay1.add(2);
+ pay1.add(3);
+ expectedPayloadsArray.put("1", pay1);
+ List<Object> pay2 = new ArrayList<Object>();
+ pay2.add(1);
+ pay2.add(2);
+ expectedPayloadsArray.put("2", pay2);
+ List<Object> pay3 = new ArrayList<Object>();
+ pay3.add(1);
+ pay3.add(-1);
+ expectedPayloadsArray.put("3", pay3);
+ /*
+ * "a|1 b|2 b|3 c|4 d " "b|1 b|2 c|3 d|4 a " "b|1 c|2 d|3 a|4 b "
+ */
+ expectedStartOffsetsArray = new HashMap<String, List<Object>>();
+ List<Object> starts1 = new ArrayList<Object>();
+ starts1.add(4);
+ starts1.add(8);
+ expectedStartOffsetsArray.put("1", starts1);
+ List<Object> starts2 = new ArrayList<Object>();
+ starts2.add(0);
+ starts2.add(4);
+ expectedStartOffsetsArray.put("2", starts2);
+ List<Object> starts3 = new ArrayList<Object>();
+ starts3.add(0);
+ starts3.add(16);
+ expectedStartOffsetsArray.put("3", starts3);
+
+ expectedEndOffsetsArray = new HashMap<String, List<Object>>();
+ List<Object> ends1 = new ArrayList<Object>();
+ ends1.add(7);
+ ends1.add(11);
+ expectedEndOffsetsArray.put("1", ends1);
+ List<Object> ends2 = new ArrayList<Object>();
+ ends2.add(3);
+ ends2.add(7);
+ expectedEndOffsetsArray.put("2", ends2);
+ List<Object> ends3 = new ArrayList<Object>();
+ ends3.add(3);
+ ends3.add(17);
+ expectedEndOffsetsArray.put("3", ends3);
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")
+ .put("index.number_of_replicas", 0).put("index.number_of_shards", randomIntBetween(1, 6))));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("int_payload_field", "a|1 b|2 b|3 c|4 d "), client()
+ .prepareIndex("test", "type1", "2").setSource("int_payload_field", "b|1 b|2 c|3 d|4 a "),
+ client().prepareIndex("test", "type1", "3").setSource("int_payload_field", "b|1 c|2 d|3 a|4 b "));
+ ensureGreen();
+
+ }
+
+ @Test
+ public void testTwoScripts() throws Exception {
+
+ initTestData();
+
+ // check term frequencies for 'a'
+ String scriptFieldScript = "term = _index['int_payload_field']['c']; term.tf()";
+ scriptFieldScript = "1";
+ String scoreScript = "term = _index['int_payload_field']['b']; term.tf()";
+ Map<String, Object> expectedResultsField = new HashMap<String, Object>();
+ expectedResultsField.put("1", 1);
+ expectedResultsField.put("2", 1);
+ expectedResultsField.put("3", 1);
+ Map<String, Object> expectedResultsScore = new HashMap<String, Object>();
+ expectedResultsScore.put("1", 2f);
+ expectedResultsScore.put("2", 2f);
+ expectedResultsScore.put("3", 2f);
+ checkOnlyFunctionScore(scoreScript, expectedResultsScore, 3);
+ checkValueInEachDocWithFunctionScore(scriptFieldScript, expectedResultsField, scoreScript, expectedResultsScore, 3);
+
+ }
+
+ @Test
+ public void testCallWithDifferentFlagsFails() throws Exception {
+
+ initTestData();
+
+ // should throw an exception, we cannot call with different flags twice
+ // if the flags of the second call were not included in the first call.
+ String script = "term = _index['int_payload_field']['b']; return _index['int_payload_field'].get('b', _POSITIONS).tf();";
+ try {
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(
+ e.getDetailedMessage()
+ .indexOf(
+ "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]; "),
+ Matchers.greaterThan(-1));
+ }
+
+ // Should not throw an exception this way round
+ script = "term = _index['int_payload_field'].get('b', _POSITIONS | _FREQUENCIES);return _index['int_payload_field']['b'].tf();";
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ }
+
+ private void checkOnlyFunctionScore(String scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript))).execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ @Test
+ public void testDocumentationExample() throws Exception {
+
+ initTestData();
+
+ String script = "term = _index['float_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos : term) {payloadSum = pos.payloadAsInt(0);} return payloadSum;";
+
+ // non existing field: sum should be 0
+ HashMap<String, Object> zeroArray = new HashMap<String, Object>();
+ zeroArray.put("1", 0);
+ zeroArray.put("2", 0);
+ zeroArray.put("3", 0);
+ checkValueInEachDoc(script, zeroArray, 3);
+
+ script = "term = _index['int_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos : term) {payloadSum = payloadSum + pos.payloadAsInt(0);} return payloadSum;";
+
+ // existing field: sums should be as here:
+ zeroArray.put("1", 5);
+ zeroArray.put("2", 3);
+ zeroArray.put("3", 1);
+ checkValueInEachDoc(script, zeroArray, 3);
+ }
+
+ @Test
+ public void testIteratorAndRecording() throws Exception {
+
+ initTestData();
+
+ // call twice with record: should work as expected
+ String script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // no record and get iterator twice: should fail
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ // no record and get termObject twice and iterate: should fail
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ }
+
+ private String createPositionsArrayScriptGetInfoObjectTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} ;_index['int_payload_field'].get('" + term + "',"
+ + flags + "); array=[]; for (pos : term) {array.add(pos." + what + ")}";
+ return script;
+ }
+
+ private String createPositionsArrayScriptIterateTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} array=[]; for (pos : term) {array.add(pos." + what
+ + ")} return array;";
+ return script;
+ }
+
+ private String createPositionsArrayScript(String field, String term, String flags, String what) {
+ String script = "term = _index['" + field + "'].get('" + term + "'," + flags
+ + "); array=[]; for (pos : term) {array.add(pos." + what + ")} return array;";
+ return script;
+ }
+
+ private String createPositionsArrayScriptDefaultGet(String field, String term, String what) {
+ String script = "term = _index['" + field + "']['" + term + "']; array=[]; for (pos : term) {array.add(pos." + what
+ + ")} return array;";
+ return script;
+ }
+
+ @Test
+ public void testFlags() throws Exception {
+
+ initTestData();
+
+ // check default flag
+ String script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "position");
+ // there should be no positions
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "payloadAsInt(-1)");
+ // there should be no payload
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_FREQUENCIES flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "position");
+ // there should be no positions
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_POSITIONS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "position");
+ // there should be positions
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_OFFSETS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "position");
+ // there should be positions and s forth ...
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check FLAG_PAYLOADS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags
+ String allFlags = "_POSITIONS | _OFFSETS | _PAYLOADS";
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags without record
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ }
+
+ private void checkArrayValsInEachDoc(String script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, expectedHitSize);
+ int nullCounter = 0;
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedArray.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ if (expectedResult != null) {
+ nullCounter++;
+ }
+ }
+ assertThat(nullCounter, equalTo(expectedArray.size()));
+ }
+
+ @Test
+ public void testAllExceptPosAndOffset() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("float_payload_field").field("type", "string").field("index_options", "offsets").field("term_vector", "no")
+ .field("analyzer", "payload_float").endObject().startObject("string_payload_field").field("type", "string")
+ .field("index_options", "offsets").field("term_vector", "no").field("analyzer", "payload_string").endObject()
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ ImmutableSettings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_string.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_string.filter", "delimited_string")
+ .put("index.analysis.filter.delimited_string.delimiter", "|")
+ .put("index.analysis.filter.delimited_string.encoding", "identity")
+ .put("index.analysis.filter.delimited_string.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter").put("index.number_of_replicas", 0)
+ .put("index.number_of_shards", 1)));
+ ensureYellow();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("float_payload_field", "a|1 b|2 a|3 b "), client()
+ .prepareIndex("test", "type1", "2").setSource("string_payload_field", "a|a b|b a|a b "),
+ client().prepareIndex("test", "type1", "3").setSource("float_payload_field", "a|4 b|5 a|6 b "),
+ client().prepareIndex("test", "type1", "4").setSource("string_payload_field", "a|b b|a a|b b "),
+ client().prepareIndex("test", "type1", "5").setSource("float_payload_field", "c "),
+ client().prepareIndex("test", "type1", "6").setSource("int_payload_field", "c|1"));
+
+ // get the number of all docs
+ String script = "_index.numDocs()";
+ checkValueInEachDoc(6, script, 6);
+
+ // get the number of docs with field float_payload_field
+ script = "_index['float_payload_field'].docCount()";
+ checkValueInEachDoc(3, script, 6);
+
+ // corner case: what if the field does not exist?
+ script = "_index['non_existent_field'].docCount()";
+ checkValueInEachDoc(0, script, 6);
+
+ // get the number of all tokens in all docs
+ script = "_index['float_payload_field'].sumttf()";
+ checkValueInEachDoc(9, script, 6);
+
+ // corner case get the number of all tokens in all docs for non existent
+ // field
+ script = "_index['non_existent_field'].sumttf()";
+ checkValueInEachDoc(0, script, 6);
+
+ // get the sum of doc freqs in all docs
+ script = "_index['float_payload_field'].sumdf()";
+ checkValueInEachDoc(5, script, 6);
+
+ // get the sum of doc freqs in all docs for non existent field
+ script = "_index['non_existent_field'].sumdf()";
+ checkValueInEachDoc(0, script, 6);
+
+ // check term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.tf()}";
+ Map<String, Object> expectedResults = new HashMap<String, Object>();
+ expectedResults.put("1", 2);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 2);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for 'c'
+ script = "term = _index['float_payload_field']['c']; if (term != null) {term.df()}";
+ expectedResults.put("1", 1l);
+ expectedResults.put("2", 1l);
+ expectedResults.put("3", 1l);
+ expectedResults.put("4", 1l);
+ expectedResults.put("5", 1l);
+ expectedResults.put("6", 1l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}";
+ expectedResults.put("1", 0l);
+ expectedResults.put("2", 0l);
+ expectedResults.put("3", 0l);
+ expectedResults.put("4", 0l);
+ expectedResults.put("5", 0l);
+ expectedResults.put("6", 0l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}";
+ expectedResults.put("1", 0);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 0);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check total term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}";
+ expectedResults.put("1", 4l);
+ expectedResults.put("2", 4l);
+ expectedResults.put("3", 4l);
+ expectedResults.put("4", 4l);
+ expectedResults.put("5", 4l);
+ expectedResults.put("6", 4l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check float payload for 'b'
+ HashMap<String, List<Object>> expectedPayloadsArray = new HashMap<String, List<Object>>();
+ script = createPositionsArrayScript("float_payload_field", "b", includeAllFlag, "payloadAsFloat(-1)");
+ float missingValue = -1;
+ List<Object> payloadsFor1 = new ArrayList<Object>();
+ payloadsFor1.add(2f);
+ payloadsFor1.add(missingValue);
+ expectedPayloadsArray.put("1", payloadsFor1);
+ List<Object> payloadsFor2 = new ArrayList<Object>();
+ payloadsFor2.add(5f);
+ payloadsFor2.add(missingValue);
+ expectedPayloadsArray.put("3", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<Object>());
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("4", new ArrayList<Object>());
+ expectedPayloadsArray.put("2", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check string payload for 'b'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("string_payload_field", "b", includeAllFlag, "payloadAsString()");
+ payloadsFor1.add("b");
+ payloadsFor1.add(null);
+ expectedPayloadsArray.put("2", payloadsFor1);
+ payloadsFor2.add("a");
+ payloadsFor2.add(null);
+ expectedPayloadsArray.put("4", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<Object>());
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("3", new ArrayList<Object>());
+ expectedPayloadsArray.put("1", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check int payload for 'c'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("int_payload_field", "c", includeAllFlag, "payloadAsInt(-1)");
+ payloadsFor1 = new ArrayList<Object>();
+ payloadsFor1.add(1);
+ expectedPayloadsArray.put("6", payloadsFor1);
+ expectedPayloadsArray.put("5", new ArrayList<Object>());
+ expectedPayloadsArray.put("4", new ArrayList<Object>());
+ expectedPayloadsArray.put("3", new ArrayList<Object>());
+ expectedPayloadsArray.put("2", new ArrayList<Object>());
+ expectedPayloadsArray.put("1", new ArrayList<Object>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ }
+
+ private void checkExceptions(String script) {
+ try {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ ShardSearchFailure[] shardFails = sr.getShardFailures();
+ for (ShardSearchFailure fail : shardFails) {
+ assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
+ Matchers.greaterThan(-1));
+ }
+ } catch (SearchPhaseExecutionException ex) {
+
+ assertThat(
+ ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
+ Matchers.greaterThan(-1));
+ }
+ }
+
+ private void checkValueInEachDocWithFunctionScore(String fieldScript, Map<String, Object> expectedFieldVals, String scoreScript,
+ Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript)))
+ .addScriptField("tvtest", fieldScript).execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedFieldVals.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ private void checkValueInEachDoc(String script, Map<String, Object> expectedResults, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedResults.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ }
+ }
+
+ private void checkValueInEachDoc(int value, String script, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ ElasticsearchAssertions.assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ if (result instanceof Integer) {
+ assertThat(((Integer) result).intValue(), equalTo(value));
+ } else if (result instanceof Long) {
+ assertThat(((Long) result).intValue(), equalTo(value));
+ } else {
+ fail();
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java
new file mode 100644
index 0000000..7c66752
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class NativeScriptTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNativeScript() {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("script.native.my.type", MyNativeScriptFactory.class.getName())
+ .build();
+ Injector injector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new ScriptModule(settings)).createInjector();
+
+ ScriptService scriptService = injector.getInstance(ScriptService.class);
+
+ ExecutableScript executable = scriptService.executable("native", "my", null);
+ assertThat(executable.run().toString(), equalTo("test"));
+ }
+
+ static class MyNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new MyScript();
+ }
+ }
+
+ static class MyScript extends AbstractExecutableScript {
+ @Override
+ public Object run() {
+ return "test";
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/script/ScriptFieldTests.java b/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
new file mode 100644
index 0000000..9615f96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.SUITE, numNodes = 3)
+public class ScriptFieldTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put("plugin.types", CustomScriptPlugin.class.getName()).put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ static int[] intArray = { Integer.MAX_VALUE, Integer.MIN_VALUE, 3 };
+ static long[] longArray = { Long.MAX_VALUE, Long.MIN_VALUE, 9223372036854775807l };
+ static float[] floatArray = { Float.MAX_VALUE, Float.MIN_VALUE, 3.3f };
+ static double[] doubleArray = { Double.MAX_VALUE, Double.MIN_VALUE, 3.3d };
+
+ public void testNativeScript() throws InterruptedException, ExecutionException {
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("text", "doc1"), client()
+ .prepareIndex("test", "type1", "2").setSource("text", "doc2"),
+ client().prepareIndex("test", "type1", "3").setSource("text", "doc3"), client().prepareIndex("test", "type1", "4")
+ .setSource("text", "doc4"), client().prepareIndex("test", "type1", "5").setSource("text", "doc5"), client()
+ .prepareIndex("test", "type1", "6").setSource("text", "doc6"));
+
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .addScriptField("int", "native", "int", null).addScriptField("float", "native", "float", null)
+ .addScriptField("double", "native", "double", null).addScriptField("long", "native", "long", null).execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(6));
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("int").getValues().get(0);
+ assertThat(result, equalTo((Object) intArray));
+ result = hit.getFields().get("long").getValues().get(0);
+ assertThat(result, equalTo((Object) longArray));
+ result = hit.getFields().get("float").getValues().get(0);
+ assertThat(result, equalTo((Object) floatArray));
+ result = hit.getFields().get("double").getValues().get(0);
+ assertThat(result, equalTo((Object) doubleArray));
+ }
+ }
+
+ static class IntArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new IntScript();
+ }
+ }
+
+ static class IntScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return intArray;
+ }
+ }
+
+ static class LongArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new LongScript();
+ }
+ }
+
+ static class LongScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return longArray;
+ }
+ }
+
+ static class FloatArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new FloatScript();
+ }
+ }
+
+ static class FloatScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return floatArray;
+ }
+ }
+
+ static class DoubleArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new DoubleScript();
+ }
+ }
+
+ static class DoubleScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return doubleArray;
+ }
+ }
+
+ public static class CustomScriptPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "custom_script";
+ }
+
+ @Override
+ public String description() {
+ return "script ";
+ }
+
+ public void onModule(ScriptModule scriptModule) {
+ scriptModule.registerScript("int", IntArrayScriptFactory.class);
+ scriptModule.registerScript("long", LongArrayScriptFactory.class);
+ scriptModule.registerScript("float", FloatArrayScriptFactory.class);
+ scriptModule.registerScript("double", DoubleArrayScriptFactory.class);
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java b/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
new file mode 100644
index 0000000..aee1577
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search;
+
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class StressSearchServiceReaperTest extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ // very frequent checks
+ return ImmutableSettings.builder().put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(1)).build();
+ }
+
+ @Slow
+ @Test // see issue #5165 - this test fails each time without the fix in pull #5170
+ public void testStressReaper() throws ExecutionException, InterruptedException {
+ int num = atLeast(100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ prepareCreate("test").setSettings("number_of_shards", randomIntBetween(1,5), "number_of_replicas", randomIntBetween(0,1)).setSettings();
+ indexRandom(true, builders);
+ ensureYellow();
+ final int iterations = atLeast(500);
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(num).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, num);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java b/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
new file mode 100644
index 0000000..9633b85
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntIntMap;
+import com.carrotsearch.hppc.IntIntOpenHashMap;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class CombiTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ /**
+ * Making sure that if there are multiple aggregations, working on the same field, yet require different
+ * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the
+ * field name. If the cached value source was of type "bytes" and another aggregation on the field required to see
+ * it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)
+ * so there's no conflict there.
+ */
+ @Test
+ public void multipleAggs_OnSameField_WithDifferentRequiredValueSourceType() throws Exception {
+
+ createIndex("idx");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];
+ IntIntMap values = new IntIntOpenHashMap();
+ long missingValues = 0;
+ for (int i = 0; i < builders.length; i++) {
+ String name = "name_" + randomIntBetween(1, 10);
+ if (rarely()) {
+ missingValues++;
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .endObject());
+ } else {
+ int value = randomIntBetween(1, 10);
+ values.put(value, values.getOrDefault(value, 0) + 1);
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .field("value", value)
+ .endObject());
+ }
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+
+
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_values").field("value"))
+ .addAggregation(terms("values").field("value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Aggregations aggs = response.getAggregations();
+
+ Missing missing = aggs.get("missing_values");
+ assertNotNull(missing);
+ assertThat(missing.getDocCount(), equalTo(missingValues));
+
+ Terms terms = aggs.get("values");
+ assertNotNull(terms);
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(values.size()));
+ for (Terms.Bucket bucket : buckets) {
+ values.remove(bucket.getKeyAsNumber().intValue());
+ }
+ assertTrue(values.isEmpty());
+ }
+
+
+ /**
+ * Some top aggs (eg. date_/histogram) that are executed on unmapped fields, will generate an estimate count of buckets - zero.
+ * when the sub aggregator is then created, it will take this estimation into account. This used to cause
+ * and an ArrayIndexOutOfBoundsException...
+ */
+ @Test
+ public void subAggregationForTopAggregationOnUnmappedField() throws Exception {
+
+ prepareCreate("idx").addMapping("type", jsonBuilder()
+ .startObject()
+ .startObject("type").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("value").field("type", "integer").endObject()
+ .endObject().endObject()
+ .endObject()).execute().actionGet();
+
+ ensureSearchable("idx");
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .addAggregation(histogram("values").field("value1").interval(1)
+ .subAggregation(terms("names").field("name")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(0l));
+ Histogram values = searchResponse.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getBuckets().isEmpty(), is(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java b/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
new file mode 100644
index 0000000..9ae230a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+public class ParsingTests extends ElasticsearchIntegrationTest {
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testTwoTypes() throws Exception {
+ createIndex("idx");
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("in_stock")
+ .startObject("filter")
+ .startObject("range")
+ .startObject("stock")
+ .field("gt", 0)
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("terms")
+ .field("field", "stock")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java b/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java
new file mode 100644
index 0000000..811a88a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/RandomTests.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntOpenHashSet;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.RangeFilterBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.RangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like
+ * the growth of dynamic arrays is tested.
+ */
+public class RandomTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+
+
+ // Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported
+ // Duel with filters
+ public void testRandomRanges() throws Exception {
+ final int numDocs = atLeast(1000);
+ final double[][] docs = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = randomInt(5);
+ docs[i] = new double[numValues];
+ for (int j = 0; j < numValues; ++j) {
+ docs[i][j] = randomDouble() * 100;
+ }
+ }
+
+ createIndex("idx");
+ for (int i = 0; i < docs.length; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .startArray("values");
+ for (int j = 0; j < docs[i].length; ++j) {
+ source = source.value(docs[i][j]);
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ final int numRanges = randomIntBetween(1, 20);
+ final double[][] ranges = new double[numRanges][];
+ for (int i = 0; i < ranges.length; ++i) {
+ switch (randomInt(2)) {
+ case 0:
+ ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) };
+ break;
+ case 1:
+ ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY };
+ break;
+ case 2:
+ ranges[i] = new double[] { randomInt(100), randomInt(100) };
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ RangeBuilder query = range("range").field("values");
+ for (int i = 0; i < ranges.length; ++i) {
+ String key = Integer.toString(i);
+ if (ranges[i][0] == Double.NEGATIVE_INFINITY) {
+ query.addUnboundedTo(key, ranges[i][1]);
+ } else if (ranges[i][1] == Double.POSITIVE_INFINITY) {
+ query.addUnboundedFrom(key, ranges[i][0]);
+ } else {
+ query.addRange(key, ranges[i][0], ranges[i][1]);
+ }
+ }
+
+ SearchRequestBuilder reqBuilder = client().prepareSearch("idx").addAggregation(query);
+ for (int i = 0; i < ranges.length; ++i) {
+ RangeFilterBuilder filter = FilterBuilders.rangeFilter("values");
+ if (ranges[i][0] != Double.NEGATIVE_INFINITY) {
+ filter = filter.from(ranges[i][0]);
+ }
+ if (ranges[i][1] != Double.POSITIVE_INFINITY){
+ filter = filter.to(ranges[i][1]);
+ }
+ reqBuilder = reqBuilder.addAggregation(filter("filter" + i).filter(filter));
+ }
+
+ SearchResponse resp = reqBuilder.execute().actionGet();
+ Range range = resp.getAggregations().get("range");
+
+ for (int i = 0; i < ranges.length; ++i) {
+
+ long count = 0;
+ for (double[] values : docs) {
+ for (double value : values) {
+ if (value >= ranges[i][0] && value < ranges[i][1]) {
+ ++count;
+ break;
+ }
+ }
+ }
+
+ final Range.Bucket bucket = range.getBucketByKey(Integer.toString(i));
+ assertEquals(bucket.getKey(), count, bucket.getDocCount());
+
+ final Filter filter = resp.getAggregations().get("filter" + i);
+ assertThat(filter.getDocCount(), equalTo(count));
+ }
+ }
+
+ // test long/double/string terms aggs with high number of buckets that require array growth
+ public void testDuelTerms() throws Exception {
+ // These high numbers of docs and terms are important to trigger page recycling
+ final int numDocs = atLeast(10000);
+ final int maxNumTerms = randomIntBetween(10, 100000);
+
+ final IntOpenHashSet valuesSet = new IntOpenHashSet();
+ cluster().wipeIndices("idx");
+ prepareCreate("idx").addMapping("type", jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("string_values")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("long_values")
+ .field("type", "long")
+ .endObject()
+ .startObject("double_values")
+ .field("type", "double")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < numDocs; ++i) {
+ final int[] values = new int[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomInt(maxNumTerms - 1) - 1000;
+ valuesSet.add(values[j]);
+ }
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("long_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(values[j]);
+ }
+ source = source.endArray().startArray("double_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value((double) values[j]);
+ }
+ source = source.endArray().startArray("string_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(Integer.toString(values[j]));
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("long").field("long_values").size(maxNumTerms).subAggregation(min("min").field("num")))
+ .addAggregation(terms("double").field("double_values").size(maxNumTerms).subAggregation(max("max").field("num")))
+ .addAggregation(terms("string_map").field("string_values").executionHint(TermsAggregatorFactory.EXECUTION_HINT_VALUE_MAP).size(maxNumTerms).subAggregation(stats("stats").field("num")))
+ .addAggregation(terms("string_ordinals").field("string_values").executionHint(TermsAggregatorFactory.EXECUTION_HINT_VALUE_ORDINALS).size(maxNumTerms).subAggregation(extendedStats("stats").field("num"))).execute().actionGet();
+ assertEquals(0, resp.getFailedShards());
+
+ final Terms longTerms = resp.getAggregations().get("long");
+ final Terms doubleTerms = resp.getAggregations().get("double");
+ final Terms stringMapTerms = resp.getAggregations().get("string_map");
+ final Terms stringOrdinalsTerms = resp.getAggregations().get("string_ordinals");
+
+ assertEquals(valuesSet.size(), longTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), doubleTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringOrdinalsTerms.getBuckets().size());
+ for (Terms.Bucket bucket : longTerms.getBuckets()) {
+ final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsText().string())));
+ final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsText().string());
+ final Terms.Bucket stringOrdinalsBucket = stringOrdinalsTerms.getBucketByKey(bucket.getKeyAsText().string());
+ assertNotNull(doubleBucket);
+ assertNotNull(stringMapBucket);
+ assertNotNull(stringOrdinalsBucket);
+ assertEquals(bucket.getDocCount(), doubleBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringOrdinalsBucket.getDocCount());
+ }
+ }
+
+ // Duel between histograms and scripted terms
+ public void testDuelTermsHistogram() throws Exception {
+ createIndex("idx");
+
+ final int numDocs = atLeast(1000);
+ final int maxNumTerms = randomIntBetween(10, 2000);
+ final int interval = randomIntBetween(1, 100);
+
+ final Integer[] values = new Integer[maxNumTerms];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomInt(maxNumTerms * 3) - maxNumTerms;
+ }
+
+ for (int i = 0; i < numDocs; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("values");
+ final int numValues = randomInt(4);
+ for (int j = 0; j < numValues; ++j) {
+ source = source.value(randomFrom(values));
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenient()).execute().get());
+
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("values").script("floor(_value / interval)").param("interval", interval).size(maxNumTerms))
+ .addAggregation(histogram("histo").field("values").interval(interval))
+ .execute().actionGet();
+
+ assertThat(resp.getFailedShards(), equalTo(0));
+
+ Terms terms = resp.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ Histogram histo = resp.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size()));
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ final long key = bucket.getKeyAsNumber().longValue() * interval;
+ final Histogram.Bucket histoBucket = histo.getBucketByKey(key);
+ assertEquals(bucket.getDocCount(), histoBucket.getDocCount());
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java
new file mode 100644
index 0000000..bd354d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.ObjectLongMap;
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectLongCursor;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.common.util.BigArraysTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+public class BytesRefHashTests extends ElasticsearchTestCase {
+
+ BytesRefHash hash;
+
+ private void newHash() {
+ if (hash != null) {
+ hash.release();
+ }
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randomCacheRecycler());
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ newHash();
+ }
+
+ public void testDuell() {
+ final int len = randomIntBetween(1, 100000);
+ final BytesRef[] values = new BytesRef[len];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new BytesRef(randomAsciiOfLength(5));
+ }
+ final ObjectLongMap<BytesRef> valueToId = new ObjectLongOpenHashMap<BytesRef>();
+ final BytesRef[] idToValue = new BytesRef[values.length];
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final BytesRef value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(- 1 - valueToId.get(value), hash.add(value, value.hashCode()));
+ } else {
+ assertEquals(valueToId.size(), hash.add(value, value.hashCode()));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), hash.size());
+ for (Iterator<ObjectLongCursor<BytesRef>> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final ObjectLongCursor<BytesRef> next = iterator.next();
+ assertEquals(next.value, hash.find(next.key, next.key.hashCode()));
+ }
+
+ for (long i = 0; i < hash.capacity(); ++i) {
+ final long id = hash.id(i);
+ BytesRef spare = new BytesRef();
+ if (id >= 0) {
+ hash.get(id, spare);
+ assertEquals(idToValue[(int) id], spare);
+ }
+ }
+ hash.release();
+ }
+
+ // START - tests borrowed from LUCENE
+
+ /**
+ * Test method for {@link org.apache.lucene.util.BytesRefHash#size()}.
+ */
+ @Test
+ public void testSize() {
+ BytesRef ref = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ final int mod = 1+randomInt(40);
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key < 0)
+ assertEquals(hash.size(), count);
+ else
+ assertEquals(hash.size(), count + 1);
+ if(i % mod == 0) {
+ newHash();
+ }
+ }
+ }
+ hash.release();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#get(int, BytesRef)}
+ * .
+ */
+ @Test
+ public void testGet() {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Map<String, Long> strings = new HashMap<String, Long>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key >= 0) {
+ assertNull(strings.put(str, Long.valueOf(key)));
+ assertEquals(uniqueCount, key);
+ uniqueCount++;
+ assertEquals(hash.size(), count + 1);
+ } else {
+ assertTrue((-key)-1 < count);
+ assertEquals(hash.size(), count);
+ }
+ }
+ for (Entry<String, Long> entry : strings.entrySet()) {
+ ref.copyChars(entry.getKey());
+ assertEquals(ref, hash.get(entry.getValue().longValue(), scratch));
+ }
+ newHash();
+ }
+ hash.release();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#add(org.apache.lucene.util.BytesRef)}
+ * .
+ */
+ @Test
+ public void testAdd() {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<String>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref);
+
+ if (key >=0) {
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ } else {
+ assertFalse(strings.add(str));
+ assertTrue((-key)-1 < count);
+ assertEquals(str, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.release();
+ }
+
+ @Test
+ public void testFind() throws Exception {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ int num = atLeast(2);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<String>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.find(ref); //hash.add(ref);
+ if (key >= 0) { // string found in hash
+ assertFalse(strings.add(str));
+ assertTrue(key < count);
+ assertEquals(str, hash.get(key, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ } else {
+ key = hash.add(ref);
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.release();
+ }
+
+ private void assertAllIn(Set<String> strings, BytesRefHash hash) {
+ BytesRef ref = new BytesRef();
+ BytesRef scratch = new BytesRef();
+ long count = hash.size();
+ for (String string : strings) {
+ ref.copyChars(string);
+ long key = hash.add(ref); // add again to check duplicates
+ assertEquals(string, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ assertTrue("key: " + key + " count: " + count + " string: " + string,
+ key < count);
+ }
+ }
+
+ // END - tests borrowed from LUCENE
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
new file mode 100644
index 0000000..60b0275
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
@@ -0,0 +1,1077 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class DateHistogramTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ private DateTime date(String date) {
+ return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date);
+ }
+
+ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ // TODO: would be nice to have more random data here
+ indexRandom(true,
+ indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3
+ indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3
+ indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16
+ indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3
+ indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16
+ indexDoc(3, 23, 6)); // date: Mar 23, dates: Mar 23, Apr 24
+ ensureSearchable();
+ }
+
+ private static DateHistogram.Bucket getBucket(DateHistogram histogram, DateTime key) {
+ return getBucket(histogram, key, DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format());
+ }
+
+ private static DateHistogram.Bucket getBucket(DateHistogram histogram, DateTime key, String format) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return histogram.getBucketByKey(key);
+ }
+ return histogram.getBucketByKey(key.getMillis());
+ }
+ if (randomBoolean()) {
+ return histogram.getBucketByKey("" + key.getMillis());
+ }
+ return histogram.getBucketByKey(Joda.forPattern(format).printer().print(key));
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ DateHistogram.Bucket bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = getBucket(histo, key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key.getMillis()));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValuedField_WithPostTimeZone() throws Exception {
+ SearchResponse response;
+ if (randomBoolean()) {
+ response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).postZone("-01:00"))
+ .execute().actionGet();
+ } else {
+
+ // checking post_zone setting as an int
+
+ response = client().prepareSearch("idx")
+ .addAggregation(new AbstractAggregationBuilder("histo", "date_histogram") {
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject(name)
+ .startObject(type)
+ .field("field", "date")
+ .field("interval", "1d")
+ .field("post_zone", -1)
+ .endObject()
+ .endObject();
+ }
+ })
+ .execute().actionGet();
+ }
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(6));
+
+ long key = new DateTime(2012, 1, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 15, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 2, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 15, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 23, 0, 0, DateTimeZone.forID("+01:00")).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(1.0));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(5.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(15.0));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("sum", true))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("sum", false))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("stats", "sum", true))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.aggregation("stats", "sum", false))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (DateHistogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(new DateTime(2012, i+1, 1, 0, 0, DateTimeZone.UTC).getMillis()));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .script("new DateTime(_value).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("dates").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .interval(DateHistogram.Interval.MONTH)
+ .order(DateHistogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ List<DateHistogram.Bucket> buckets = new ArrayList<DateHistogram.Bucket>(histo.getBuckets());
+
+ DateHistogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ */
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ *
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ /**
+ * Jan 2
+ * Feb 2
+ * Feb 15
+ * Mar 2
+ * Mar 15
+ * Mar 23
+ */
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['date'].value").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .script("doc['date'].value")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['dates'].values").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .script("doc['dates'].values")
+ .interval(DateHistogram.Interval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 2, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ long key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC).getMillis();
+ bucket = histo.getBucketByKey(key);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateHistogram("date_histo").interval(1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ DateHistogram dateHisto = bucket.getAggregations().get("date_histo");
+ assertThat(dateHisto, Matchers.notNullValue());
+ assertThat(dateHisto.getName(), equalTo("date_histo"));
+ assertThat(dateHisto.getBuckets().isEmpty(), is(true));
+
+ }
+
+ @Test
+ public void singleValue_WithPreZone() throws Exception {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ DateTime date = date("2014-03-11T00:00:00+00:00");
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(1);
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .preZone("-2:00")
+ .interval(DateHistogram.Interval.DAY)
+ .format("yyyy-MM-dd"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ DateHistogram histo = response.getAggregations().get("date_histo");
+ Collection<? extends DateHistogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ DateHistogram.Bucket bucket = histo.getBucketByKey("2014-03-10");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = histo.getBucketByKey("2014-03-11");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValue_WithPreZone_WithAadjustLargeInterval() throws Exception {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ DateTime date = date("2014-03-11T00:00:00+00:00");
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(1);
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .preZone("-2:00")
+ .interval(DateHistogram.Interval.DAY)
+ .preZoneAdjustLargeInterval(true)
+ .format("yyyy-MM-dd'T'HH:mm:ss"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ DateHistogram histo = response.getAggregations().get("date_histo");
+ Collection<? extends DateHistogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ DateHistogram.Bucket bucket = histo.getBucketByKey("2014-03-10T02:00:00");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = histo.getBucketByKey("2014-03-11T02:00:00");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
new file mode 100644
index 0000000..01e81cd
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
@@ -0,0 +1,1058 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+public class DateRangeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ private static DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ int numDocs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(7, 20);
+
+ List<IndexRequestBuilder> docs = new ArrayList<IndexRequestBuilder>();
+ docs.addAll(Arrays.asList(
+ indexDoc(1, 2, 1), // Jan 2
+ indexDoc(2, 2, 2), // Feb 2
+ indexDoc(2, 15, 3), // Feb 15
+ indexDoc(3, 2, 4), // Mar 2
+ indexDoc(3, 15, 5), // Mar 15
+ indexDoc(3, 23, 6))); // Mar 23
+
+ // dummy docs
+ for (int i = docs.size(); i < numDocs; ++i) {
+ docs.add(indexDoc(randomIntBetween(6, 10), randomIntBetween(1, 20), randomInt(100)));
+ }
+
+ indexRandom(true, docs);
+ ensureSearchable();
+ }
+
+ @Test
+ public void dateMath() throws Exception {
+ DateRangeBuilder rangeBuilder = dateRange("range");
+ if (randomBoolean()) {
+ rangeBuilder.field("date");
+ } else {
+ rangeBuilder.script("doc['date'].value");
+ }
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(rangeBuilder
+ .addUnboundedTo("a long time ago", "now-50y")
+ .addRange("recently", "now-50y", "now-1y")
+ .addUnboundedFrom("last year", "now-1y"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ List<DateRange.Bucket> buckets = new ArrayList<DateRange.Bucket>(range.getBuckets());
+
+ DateRange.Bucket bucket = buckets.get(0);
+ assertThat(bucket.getKey(), equalTo("a long time ago"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+
+ bucket = buckets.get(1);
+ assertThat(bucket.getKey(), equalTo("recently"));
+ assertThat(bucket.getDocCount(), equalTo((long) numDocs));
+
+ bucket = buckets.get(2);
+ assertThat(bucket.getKey(), equalTo("last year"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates_WithCustomFormat() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .format("yyyy-MM-dd")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15-2012-03-15");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15-2012-03-15"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithDateMath() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-02-15||+1M")
+ .addUnboundedFrom("2012-02-15||+1M"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ Jan 2, 1
+ Feb 2, 2
+ Feb 15, 3
+ Mar 2, 4
+ Mar 15, 5
+ Mar 23, 6
+ */
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 1 + 2));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 3 + 4));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(3, 15).getMillis()));
+ }
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ }
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 3).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(4, 3).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['date'].value")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['date'].value")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void unmapped_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ DateRange range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ DateRange.Bucket bucket = range.getBucketByKey("*-2012-02-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsDate(), nullValue());
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(2, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(2, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getToAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("2012-03-15T00:00:00.000Z-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) date(3, 15).getMillis()));
+ assertThat(bucket.getFromAsDate(), equalTo(date(3, 15)));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsDate(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateRange("date_range").addRange("0-1", 0, 1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ DateRange dateRange = bucket.getAggregations().get("date_range");
+ List<DateRange.Bucket> buckets = new ArrayList<DateRange.Bucket>(dateRange.getBuckets());
+ assertThat(dateRange, Matchers.notNullValue());
+ assertThat(dateRange.getName(), equalTo("date_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-1"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(1.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true));
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
new file mode 100644
index 0000000..a386558
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
@@ -0,0 +1,880 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class DoubleTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final int NUM_DOCS = 5; // TODO: randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "d_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "d_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+
+ IndexRequestBuilder[] lowcardBuilders = new IndexRequestBuilder[NUM_DOCS];
+ for (int i = 0; i < lowcardBuilders.length; i++) {
+ lowcardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray()
+ .endObject());
+
+ }
+ indexRandom(randomBoolean(), lowcardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO: randomize the size?
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray()
+ .endObject());
+ }
+ indexRandom(true, highCardBuilders);
+
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : bucket.getKeyAsText().string();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("(long) _value / 1000 + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().doubleValue(), equalTo(i+1d));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type is explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .valueType(Terms.ValueType.DOUBLE)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i + ".0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i + ".0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void script_Score() {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(functionScoreQuery(matchAllQuery()).add(ScoreFunctionBuilders.scriptFunction("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")))
+ .addAggregation(terms("terms")
+ .script("ceil(_doc.score/3)")
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 0; i < 3; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
new file mode 100644
index 0000000..3332dd6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.matchAllFilter;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class FilterTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs, numTag1Docs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ numDocs = randomIntBetween(5, 20);
+ numTag1Docs = randomIntBetween(1, numDocs - 1);
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < numTag1Docs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numTag1Docs; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .field("tag", "tag2")
+ .field("name", "name" + i)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureGreen();
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1").filter(termFilter("tag", "tag1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termFilter("tag", "tag1"))
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+
+ long sum = 0;
+ for (int i = 0; i < numTag1Docs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(filter.getAggregations().asList().isEmpty(), is(false));
+ Avg avgValue = filter.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs));
+ }
+
+ @Test
+ public void withContextBasedSubAggregation() throws Exception {
+
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termFilter("tag", "tag1"))
+ .subAggregation(avg("avg_value")))
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" +
+ "context which the sub-aggregation can inherit");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(filter("filter").filter(matchAllFilter())))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Filter filter = bucket.getAggregations().get("filter");
+ assertThat(filter, Matchers.notNullValue());
+ assertThat(filter.getName(), equalTo("filter"));
+ assertThat(filter.getDocCount(), is(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
new file mode 100644
index 0000000..3d0c3ad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
@@ -0,0 +1,439 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistance;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ source.startArray("location");
+ for (int i = 0; i < latLons.length; i++) {
+ source.value(latLons[i]);
+ }
+ source.endArray();
+ source = source.endObject();
+ return client().prepareIndex(idx, "type").setSource(source);
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ prepareCreate("idx-multi")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> cities = new ArrayList<IndexRequestBuilder>();
+ cities.addAll(Arrays.asList(
+ // below 500km
+ indexCity("idx", "utrecht", "52.0945, 5.116"),
+ indexCity("idx", "haarlem", "52.3890, 4.637"),
+ // above 500km, below 1000km
+ indexCity("idx", "berlin", "52.540, 13.409"),
+ indexCity("idx", "prague", "50.097679, 14.441314"),
+ // above 1000km
+ indexCity("idx", "tel-aviv", "32.0741, 34.777")));
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean()) {
+ cities.add(indexCity("idx", cityName));
+ }
+ }
+ indexRandom(true, cities);
+
+ cities.clear();
+ cities.addAll(Arrays.asList(
+ indexCity("idx-multi", "city1", "52.3890, 4.637", "50.097679,14.441314"), // first point is within the ~17.5km, the second is ~710km
+ indexCity("idx-multi", "city2", "52.540, 13.409", "52.0945, 5.116"), // first point is ~576km, the second is within the ~35km
+ indexCity("idx-multi", "city3", "32.0741, 34.777"))); // above 1000km
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean() || true) {
+ cities.add(indexCity("idx-multi", cityName));
+ }
+ }
+ indexRandom(true, cities);
+
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void simple_WithCustomKeys() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo("ring1", 500)
+ .addRange("ring2", 500, 1000)
+ .addUnboundedFrom("ring3", 1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("ring1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("ring2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("ring3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("ring3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000)
+ .subAggregation(terms("cities").field("city")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Terms cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ Set<String> names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("berlin") && names.contains("prague"), is(true));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKey());
+ }
+ assertThat(names.contains("tel-aviv"), is(true));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "location", "type=geo_point").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("location", "52.0945, 5.116")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(geoDistance("geo_dist").field("location").point("52.3760, 4.894").addRange("0-100", 0.0, 100.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ GeoDistance geoDistance = bucket.getAggregations().get("geo_dist");
+ List<GeoDistance.Bucket> buckets = new ArrayList<GeoDistance.Bucket>(geoDistance.getBuckets());
+ assertThat(geoDistance, Matchers.notNullValue());
+ assertThat(geoDistance.getName(), equalTo("geo_dist"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-100"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(100.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void multiValues() throws Exception {
+ SearchResponse response = client().prepareSearch("idx-multi")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoDistance geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ GeoDistance.Bucket bucket = geoDist.getBucketByKey("*-500.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-500.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(0.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("500.0-1000.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(500.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = geoDist.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
new file mode 100644
index 0000000..5897045
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.ObjectIntMap;
+import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.cursors.ObjectIntCursor;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.GeoBoundingBoxFilterBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GeoHashGridTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexCity(String name, String latLon) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ if (latLon != null) {
+ source = source.field("location", latLon);
+ }
+ source = source.endObject();
+ return client().prepareIndex("idx", "type").setSource(source);
+ }
+
+
+ ObjectIntMap<String> expectedDocCountsForGeoHash = null;
+ int highestPrecisionGeohash = 12;
+ int numRandomPoints = 100;
+
+ String smallestGeoHash = null;
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> cities = new ArrayList<IndexRequestBuilder>();
+ Random random = getRandom();
+ expectedDocCountsForGeoHash = new ObjectIntOpenHashMap<String>(numRandomPoints * 2);
+ for (int i = 0; i < numRandomPoints; i++) {
+ //generate random point
+ double lat = (180d * random.nextDouble()) - 90d;
+ double lng = (360d * random.nextDouble()) - 180d;
+ String randomGeoHash = GeoHashUtils.encode(lat, lng, highestPrecisionGeohash);
+ //Index at the highest resolution
+ cities.add(indexCity(randomGeoHash, lat + ", " + lng));
+ expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0) + 1);
+ //Update expected doc counts for all resolutions..
+ for (int precision = highestPrecisionGeohash - 1; precision > 0; precision--) {
+ String hash = GeoHashUtils.encode(lat, lng, precision);
+ if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) {
+ smallestGeoHash = hash;
+ }
+ expectedDocCountsForGeoHash.put(hash, expectedDocCountsForGeoHash.getOrDefault(hash, 0) + 1);
+ }
+ }
+ indexRandom(true, cities);
+ ensureSearchable();
+ }
+
+
+ @Test
+ public void simple() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void filtered() throws Exception {
+ GeoBoundingBoxFilterBuilder bbox = new GeoBoundingBoxFilterBuilder("location");
+ bbox.topLeft(smallestGeoHash).bottomRight(smallestGeoHash).filterName("bbox");
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ AggregationBuilders.filter("filtered").filter(bbox)
+ .subAggregation(
+ geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+
+ Filter filter = response.getAggregations().get("filtered");
+
+ GeoHashGrid geoGrid = filter.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash));
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ assertThat(geoGrid.getBuckets().size(), equalTo(0));
+ }
+
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void testTopMatch() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .size(1)
+ .shardSize(100)
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ //Check we only have one bucket with the best match for that resolution
+ assertThat(geoGrid.getBuckets().size(), equalTo(1));
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKey();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = 0;
+ for (ObjectIntCursor<String> cursor : expectedDocCountsForGeoHash) {
+ if (cursor.key.length() == precision) {
+ expectedBucketCount = Math.max(expectedBucketCount, cursor.value);
+ }
+ }
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
new file mode 100644
index 0000000..05b0ea5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class GlobalTests extends ElasticsearchIntegrationTest {
+
+ int numDocs;
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ numDocs = randomIntBetween(3, 20);
+ for (int i = 0; i < numDocs / 2; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numDocs / 2; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag2")
+ .field("name", "name" + i+1)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void withStatsSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(stats("value_stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Global global = response.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo((long) numDocs));
+ assertThat(global.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = global.getAggregations().get("value_stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("value_stats"));
+ long sum = 0;
+ for (int i = 0; i < numDocs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(stats.getAvg(), equalTo((double) sum / numDocs));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo((double) numDocs));
+ assertThat(stats.getCount(), equalTo((long) numDocs));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ }
+
+ @Test
+ public void nonTopLevel() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(global("inner_global")))
+ .execute().actionGet();
+
+ fail("expected to fail executing non-top-level global aggregator. global aggregations are only allowed as top level" +
+ "aggregations");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
new file mode 100644
index 0000000..b9f38d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
@@ -0,0 +1,790 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongOpenHashSet;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class HistogramTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs;
+ int interval;
+ int numValueBuckets, numValuesBuckets;
+ long[] valueCounts, valuesCounts;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ numValueBuckets = numDocs / interval + 1;
+ valueCounts = new long[numValueBuckets];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket = (i + 1) / interval;
+ ++valueCounts[bucket];
+ }
+
+ numValuesBuckets = (numDocs + 1) / interval + 1;
+ valuesCounts = new long[numValuesBuckets];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 1) / interval;
+ final int bucket2 = (i + 2) / interval;
+ ++valuesCounts[bucket1];
+ if (bucket1 != bucket2) {
+ ++valuesCounts[bucket2];
+ }
+ }
+
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i + 1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray()
+ .field("tag", "tag" + i)
+ .endObject());
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValueBuckets -i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet buckets = new LongOpenHashSet();
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MIN_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet buckets = new LongOpenHashSet();
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MAX_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", true))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", false))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", true))
+ .subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", false))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongOpenHashSet visited = new LongOpenHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = bucket.getKeyAsNumber().longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 1) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ ++counts[(i + 2) / interval];
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i <= (numDocs + 1) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValuesBuckets -i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i <= (numDocs + 2) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval)
+ .subAggregation(terms(MULTI_VALUED_FIELD_NAME).order(Terms.Order.term(true))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs ; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numBuckets));
+
+ for (int i = 2 / interval; i < (numDocs + 2) / interval; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(counts[i]));
+ Terms terms = bucket.getAggregations().get(MULTI_VALUED_FIELD_NAME);
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo(MULTI_VALUED_FIELD_NAME));
+ int minTerm = Math.max(2, i * interval - 1);
+ int maxTerm = Math.min(numDocs + 2, (i + 1) * interval);
+ assertThat(terms.getBuckets().size(), equalTo(maxTerm - minTerm + 1));
+ Iterator<Terms.Bucket> iter = terms.getBuckets().iterator();
+ for (int j = minTerm; j <= maxTerm; ++j) {
+ assertThat(iter.next().getKeyAsNumber().longValue(), equalTo((long) j));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i || (j + 2) / interval == i) {
+ s += j + 1;
+ s += j + 2;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBucketByKey(i * interval);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(histogram("sub_histo").interval(1l)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ histo = bucket.getAggregations().get("sub_histo");
+ assertThat(histo, Matchers.notNullValue());
+ assertThat(histo.getName(), equalTo("sub_histo"));
+ assertThat(histo.getBuckets().isEmpty(), is(true));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
new file mode 100644
index 0000000..436b476
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
@@ -0,0 +1,865 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+public class IPv4RangeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "ip", "type=ip", "ips", "type=ip")
+ .execute().actionGet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[255]; // TODO randomize the size?
+ // TODO randomize the values in the docs?
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("ip", "10.0.0." + (i))
+ .startArray("ips").value("10.0.0." + i).value("10.0.0." + (i + 1)).endArray()
+ .field("value", (i < 100 ? 1 : i < 200 ? 2 : 3)) // 100 1's, 100 2's, and 55 3's
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValueField_WithMaskRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addMaskRange("10.0.0.0/25")
+ .addMaskRange("10.0.0.128/25"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("10.0.0.0/25");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.0/25"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.0")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.0"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.128"));
+ assertThat(bucket.getDocCount(), equalTo(128l));
+
+ bucket = range.getBucketByKey("10.0.0.128/25");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.128/25"));
+ assertThat((long) bucket.getFrom().doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.128"));
+ assertThat((long) bucket.getTo().doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.1.0"))); // range is exclusive on the to side
+ assertThat(bucket.getToAsString(), equalTo("10.0.1.0"));
+ assertThat(bucket.getDocCount(), equalTo(127l)); // include 10.0.0.128
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("r1", "10.0.0.100")
+ .addRange("r2", "10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("r3", "10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 100));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 200));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 55*3));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ /*
+ [0, 1]
+ [1, 2]
+ [2, 3]
+ ...
+ [99, 100]
+ [100, 101]
+ [101, 102]
+ ...
+ [199, 200]
+ [200, 201]
+ [201, 202]
+ ...
+ [254, 255]
+ [255, 256]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .script("_value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ip'].value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ip'].value")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ips'].values")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .script("doc['ips'].values")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ IPv4Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ IPv4Range.Bucket bucket = range.getBucketByKey("*-10.0.0.100");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.100-10.0.0.200");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getTo().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = range.getBucketByKey("10.0.0.200-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "ip", "type=ip").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("ip", "10.0.0.5")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(ipRange("ip_range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ IPv4Range range = bucket.getAggregations().get("ip_range");
+ List<IPv4Range.Bucket> buckets = new ArrayList<IPv4Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("ip_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("r1"));
+ assertThat(buckets.get(0).getFromAsString(), equalTo("10.0.0.1"));
+ assertThat(buckets.get(0).getToAsString(), equalTo("10.0.0.10"));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java
new file mode 100644
index 0000000..c9c7d6c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongHashTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongLongMap;
+import com.carrotsearch.hppc.LongLongOpenHashMap;
+import com.carrotsearch.hppc.cursors.LongLongCursor;
+import org.elasticsearch.common.util.BigArraysTests;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Iterator;
+
+public class LongHashTests extends ElasticsearchTestCase {
+
+ public void testDuell() {
+ final Long[] values = new Long[randomIntBetween(1, 100000)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomLong();
+ }
+ final LongLongMap valueToId = new LongLongOpenHashMap();
+ final long[] idToValue = new long[values.length];
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ final LongHash longHash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randomCacheRecycler());
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final Long value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(- 1 - valueToId.get(value), longHash.add(value));
+ } else {
+ assertEquals(valueToId.size(), longHash.add(value));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), longHash.size());
+ for (Iterator<LongLongCursor> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final LongLongCursor next = iterator.next();
+ assertEquals(next.value, longHash.find(next.key));
+ }
+
+ for (long i = 0; i < longHash.capacity(); ++i) {
+ final long id = longHash.id(i);
+ if (id >= 0) {
+ assertEquals(idToValue[(int) id], longHash.key(i));
+ }
+ }
+ longHash.release();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
new file mode 100644
index 0000000..bb1baca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
@@ -0,0 +1,848 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class LongTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final int NUM_DOCS = 5; // TODO randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS];
+ for (int i = 0; i < lowCardBuilders.length; i++) {
+ lowCardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .endObject());
+ }
+ indexRandom(randomBoolean(), lowCardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size?
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .endObject());
+
+ }
+ indexRandom(true, highCardBuilders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : key(bucket);
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value - 1"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i - 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i-1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i-1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("floor(_value / 1000 + 1)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type ie explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .valueType(Terms.ValueType.LONG)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
new file mode 100644
index 0000000..9acc300
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.junit.Before;
+
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+
+public class MinDocCountTests extends ElasticsearchIntegrationTest {
+
+ private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true);
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private int cardinality;
+
+ @Before
+ public void indexData() throws Exception {
+ createIndex("idx");
+
+ cardinality = randomIntBetween(8, 30);
+ final List<IndexRequestBuilder> indexRequests = new ArrayList<IndexRequestBuilder>();
+ final Set<String> stringTerms = new HashSet<String>();
+ final LongSet longTerms = new LongOpenHashSet();
+ final Set<String> dateTerms = new HashSet<String>();
+ for (int i = 0; i < cardinality; ++i) {
+ String stringTerm;
+ do {
+ stringTerm = RandomStrings.randomAsciiOfLength(getRandom(), 8);
+ } while (!stringTerms.add(stringTerm));
+ long longTerm;
+ do {
+ longTerm = randomInt(cardinality * 2);
+ } while (!longTerms.add(longTerm));
+ double doubleTerm = longTerm * Math.PI;
+ String dateTerm = DateTimeFormat.forPattern("yyyy-MM-dd").print(new DateTime(2014, 1, ((int) longTerm % 20) + 1, 0, 0));
+ final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20);
+ for (int j = 0; j < frequency; ++j) {
+ indexRequests.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("s", stringTerm)
+ .field("l", longTerm)
+ .field("d", doubleTerm)
+ .field("date", dateTerm)
+ .field("match", randomBoolean())
+ .endObject()));
+ }
+ }
+ cardinality = stringTerms.size();
+
+ indexRandom(true, indexRequests);
+ ensureSearchable();
+ }
+
+ private enum Script {
+ NO {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.field(field);
+ }
+ },
+ YES {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.script("doc['" + field + "'].values");
+ }
+ };
+ abstract TermsBuilder apply(TermsBuilder builder, String field);
+ }
+
+ // check that terms2 is a subset of terms1
+ private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size, String include) {
+ final Matcher matcher = include == null ? null : Pattern.compile(include).matcher("");;
+ final Iterator<Terms.Bucket> it1 = terms1.getBuckets().iterator();
+ final Iterator<Terms.Bucket> it2 = terms2.getBuckets().iterator();
+ int size2 = 0;
+ while (it1.hasNext()) {
+ final Terms.Bucket bucket1 = it1.next();
+ if (bucket1.getDocCount() >= minDocCount && (matcher == null || matcher.reset(bucket1.getKey()).matches())) {
+ if (size2++ == size) {
+ break;
+ }
+ assertTrue(it2.hasNext());
+ final Terms.Bucket bucket2 = it2.next();
+ assertEquals(bucket1.getKeyAsText(), bucket2.getKeyAsText());
+ assertEquals(bucket1.getDocCount(), bucket2.getDocCount());
+ }
+ }
+ assertFalse(it2.hasNext());
+ }
+
+ private void assertSubset(Histogram histo1, Histogram histo2, long minDocCount) {
+ final Iterator<? extends Histogram.Bucket> it2 = histo2.getBuckets().iterator();
+ for (Histogram.Bucket b1 : histo1.getBuckets()) {
+ if (b1.getDocCount() >= minDocCount) {
+ final Histogram.Bucket b2 = it2.next();
+ assertEquals(b1.getKeyAsNumber(), b2.getKeyAsNumber());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+ private void assertSubset(DateHistogram histo1, DateHistogram histo2, long minDocCount) {
+ final Iterator<? extends DateHistogram.Bucket> it2 = histo2.getBuckets().iterator();
+ for (DateHistogram.Bucket b1 : histo1.getBuckets()) {
+ if (b1.getDocCount() >= minDocCount) {
+ final DateHistogram.Bucket b2 = it2.next();
+ assertEquals(b1.getKeyAsNumber(), b2.getKeyAsNumber());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+ public void testStringTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testStringScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testStringTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testStringScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testStringCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testStringScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testStringCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testStringScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testStringCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*");
+ }
+
+ public void testStringScriptCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*");
+ }
+
+ public void testStringCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*");
+ }
+
+ public void testStringScriptCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*");
+ }
+
+ public void testLongTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testLongScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testLongTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testLongScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testLongCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testLongScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testLongCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testLongScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testDoubleTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testDoubleScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testDoubleTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testDoubleScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testDoubleCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testDoubleScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testDoubleCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testDoubleScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(false));
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception {
+ testMinDocCountOnTerms(field, script, order, null);
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include) throws Exception {
+ // all terms
+ final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .executionHint(StringTermsTests.randomExecutionHint())
+ .order(order)
+ .size(cardinality + randomInt(10))
+ .minDocCount(0))
+ .execute().actionGet();
+ final Terms allTerms = allTermsResponse.getAggregations().get("terms");
+ assertEquals(cardinality, allTerms.getBuckets().size());
+
+ for (long minDocCount = 0; minDocCount < 20; ++minDocCount) {
+ final int size = randomIntBetween(1, cardinality + 2);
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .executionHint(StringTermsTests.randomExecutionHint())
+ .order(order)
+ .size(size)
+ .include(include)
+ .shardSize(cardinality + randomInt(10))
+ .minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
+ }
+
+ }
+
+ public void testHistogramCountAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testHistogramCountDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testHistogramKeyAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testHistogramKeyDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ public void testDateHistogramCountAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testDateHistogramCountDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testDateHistogramKeyAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testDateHistogramKeyDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final Histogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+ private void testMinDocCountOnDateHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final DateHistogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSearchType(SearchType.COUNT)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (DateHistogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
new file mode 100644
index 0000000..632f4e2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class MissingTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs, numDocsMissing, numDocsUnmapped;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ numDocs = randomIntBetween(5, 20);
+ numDocsMissing = randomIntBetween(1, numDocs - 1);
+ for (int i = 0; i < numDocsMissing; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+ for (int i = numDocsMissing; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("tag", "tag1")
+ .endObject()));
+ }
+
+ createIndex("unmapped_idx");
+ numDocsUnmapped = randomIntBetween(2, 5);
+ for (int i = 0; i < numDocsUnmapped; i++) {
+ builders.add(client().prepareIndex("unmapped_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureGreen(); // wait until we are ready to serve requests
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsUnmapped));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag")
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(missing.getAggregations().asList().isEmpty(), is(false));
+
+ long sum = 0;
+ for (int i = 0; i < numDocsMissing; ++i) {
+ sum += i;
+ }
+ for (int i = 0; i < numDocsUnmapped; ++i) {
+ sum += i;
+ }
+ Avg avgValue = missing.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / (numDocsMissing + numDocsUnmapped)));
+ }
+
+ @Test
+ public void withInheritedSubMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch()
+ .addAggregation(missing("top_missing").field("tag")
+ .subAggregation(missing("sub_missing")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing topMissing = response.getAggregations().get("top_missing");
+ assertThat(topMissing, notNullValue());
+ assertThat(topMissing.getName(), equalTo("top_missing"));
+ assertThat(topMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(topMissing.getAggregations().asList().isEmpty(), is(false));
+
+ Missing subMissing = topMissing.getAggregations().get("sub_missing");
+ assertThat(subMissing, notNullValue());
+ assertThat(subMissing.getName(), equalTo("sub_missing"));
+ assertThat(subMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(missing("missing")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Missing missing = bucket.getAggregations().get("missing");
+ assertThat(missing, Matchers.notNullValue());
+ assertThat(missing.getName(), equalTo("missing"));
+ assertThat(missing.getDocCount(), is(0l));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
new file mode 100644
index 0000000..789b814
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.Comparators;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+public class NaNSortingTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ private enum SubAggregation {
+ AVG("avg") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return avg(name).field("numeric_field");
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((Avg) aggregation).getValue();
+ }
+ },
+ VARIANCE("variance") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".variance";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getVariance();
+ }
+ },
+ STD_DEVIATION("std_deviation"){
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".std_deviation";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getStdDeviation();
+ }
+ };
+
+ SubAggregation(String name) {
+ this.name = name;
+ }
+
+ public String name;
+
+ public abstract MetricsAggregationBuilder<?> builder();
+
+ public String sortKey() {
+ return name;
+ }
+
+ public abstract double getValue(Aggregation aggregation);
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ final int numDocs = randomIntBetween(2, 10);
+ for (int i = 0; i < numDocs; ++i) {
+ final long value = randomInt(5);
+ XContentBuilder source = jsonBuilder().startObject().field("long_value", value).field("double_value", value + 0.05).field("string_value", "str_" + value);
+ if (randomBoolean()) {
+ source.field("numeric_value", randomDouble());
+ }
+ client().prepareIndex("idx", "type").setSource(source.endObject()).execute().actionGet();
+ }
+ refresh();
+ ensureSearchable();
+ }
+
+ private void assertCorrectlySorted(Terms terms, boolean asc, SubAggregation agg) {
+ assertThat(terms, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ private void assertCorrectlySorted(Histogram histo, boolean asc, SubAggregation agg) {
+ assertThat(histo, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ public void testTerms(String fieldName) {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(fieldName).subAggregation(agg.builder()).order(Terms.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ final Terms terms = response.getAggregations().get("terms");
+ assertCorrectlySorted(terms, asc, agg);
+ }
+
+ @Test
+ public void stringTerms() {
+ testTerms("string_value");
+ }
+
+ @Test
+ public void longTerms() {
+ testTerms("long_value");
+ }
+
+ @Test
+ public void doubleTerms() {
+ testTerms("double_value");
+ }
+
+ @Test
+ public void longHistogram() {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo")
+ .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(Histogram.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ final Histogram histo = response.getAggregations().get("histo");
+ assertCorrectlySorted(histo, asc, agg);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
new file mode 100644
index 0000000..7b88776
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
@@ -0,0 +1,345 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class NestedTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numParents;
+ int[] numChildren;
+
+ @Before
+ public void init() throws Exception {
+
+ prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested")
+ .setSettings(indexSettings())
+ .execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+
+ numParents = randomIntBetween(3, 10);
+ numChildren = new int[numParents];
+ int totalChildren = 0;
+ for (int i = 0; i < numParents; ++i) {
+ if (i == numParents - 1 && totalChildren == 0) {
+ // we need at least one child overall
+ numChildren[i] = randomIntBetween(1, 5);
+ } else {
+ numChildren[i] = randomInt(5);
+ }
+ totalChildren += numChildren[i];
+ }
+ assertTrue(totalChildren > 0);
+
+ for (int i = 0; i < numParents; i++) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .startArray("nested");
+ for (int j = 0; j < numChildren[i]; ++j) {
+ source = source.startObject().field("value", i + 1 + j).endObject();
+ }
+ source = source.endArray().endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(source));
+ }
+
+ prepareCreate("idx_nested_nested_aggs")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("nested2")
+ .field("type", "nested")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()).get();
+
+ builders.add(
+ client().prepareIndex("idx_nested_nested_aggs", "type", "1")
+ .setSource(jsonBuilder().startObject()
+ .startArray("nested1")
+ .startObject()
+ .field("a", "a")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("a", "b")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject())
+ );
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ long sum = 0;
+ long count = 0;
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final long value = i + 1 + j;
+ min = Math.min(min, value);
+ max = Math.max(max, value);
+ sum += value;
+ ++count;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(count));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = nested.getAggregations().get("nested_value_stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMin(), equalTo(min));
+ assertThat(stats.getMax(), equalTo(max));
+ assertThat(stats.getCount(), equalTo(count));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ assertThat(stats.getAvg(), equalTo((double) sum / count));
+ }
+
+ @Test
+ public void onNonNestedField() throws Exception {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("value")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to nested facet on non-nested field/path");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void nestedWithSubTermsAgg() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(terms("values").field("nested.value").size(100)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ long docCount = 0;
+ long[] counts = new long[numParents + 6];
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final int value = i + 1 + j;
+ ++counts[value];
+ ++docCount;
+ }
+ }
+ int uniqueValues = 0;
+ for (long count : counts) {
+ if (count > 0) {
+ ++uniqueValues;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(docCount));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ LongTerms values = nested.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(uniqueValues));
+ for (int i = 0; i < counts.length; ++i) {
+ final String key = Long.toString(i);
+ if (counts[i] == 0) {
+ assertNull(values.getBucketByKey(key));
+ } else {
+ Bucket bucket = values.getBucketByKey(key);
+ assertNotNull(bucket);
+ assertEquals(counts[i], bucket.getDocCount());
+ }
+ }
+ }
+
+ @Test
+ public void nestedAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("top_values").field("value").size(100)
+ .subAggregation(nested("nested").path("nested")
+ .subAggregation(max("max_value").field("nested.value"))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ LongTerms values = response.getAggregations().get("top_values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("top_values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(numParents));
+
+ for (int i = 0; i < numParents; i++) {
+ String topValue = "" + (i + 1);
+ assertThat(values.getBucketByKey(topValue), notNullValue());
+ Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ Max max = nested.getAggregations().get("max_value");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i]));
+ }
+ }
+
+ @Test
+ public void nestNestedAggs() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_nested_nested_aggs")
+ .addAggregation(nested("level1").path("nested1")
+ .subAggregation(terms("a").field("nested1.a")
+ .subAggregation(nested("level2").path("nested1.nested2")
+ .subAggregation(sum("sum").field("nested1.nested2.b")))))
+ .get();
+ assertSearchResponse(response);
+
+
+ Nested level1 = response.getAggregations().get("level1");
+ assertThat(level1, notNullValue());
+ assertThat(level1.getName(), equalTo("level1"));
+ assertThat(level1.getDocCount(), equalTo(2l));
+
+ StringTerms a = level1.getAggregations().get("a");
+ Terms.Bucket bBucket = a.getBucketByKey("a");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ Nested level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ Sum sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+
+ a = level1.getAggregations().get("a");
+ bBucket = a.getBucketByKey("b");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+ }
+
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "nested", "type=nested").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .startArray("nested")
+ .startObject().field("value", i + 1).endObject()
+ .startObject().field("value", i + 2).endObject()
+ .startObject().field("value", i + 3).endObject()
+ .startObject().field("value", i + 4).endObject()
+ .startObject().field("value", i + 5).endObject()
+ .endArray()
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(nested("nested").path("nested")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Nested nested = bucket.getAggregations().get("nested");
+ assertThat(nested, Matchers.notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), is(0l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
new file mode 100644
index 0000000..3b62521
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
@@ -0,0 +1,947 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class RangeTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ int numDocs;
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ numDocs = randomIntBetween(10, 20);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i+1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i+1).value(i+2).endArray()
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ @Test
+ public void rangeAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).size(100).subAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(numDocs + 1));
+ for (int i = 1; i < numDocs + 2; ++i) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2;
+ assertThat(bucket.getDocCount(), equalTo(docCount));
+ Range range = bucket.getAggregations().get("range");
+ Range.Bucket rangeBucket = range.getBucketByKey("*-3.0");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 1 || i == 3) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 2) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = range.getBucketByKey("3.0-6.0");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 3 || i == 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 4 || i == 5) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = range.getBucketByKey("6.0-*");
+ assertThat(rangeBucket, notNullValue());
+ if (i == 6 || i == numDocs + 1) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i < 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ }
+ }
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(3.0)); // 1 + 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(12.0)); // 3 + 4 + 5
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l)); // 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l)); // 3, 4, 5
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+
+ r1: 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value + 1")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(2d+3d));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3L));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 3; i < numDocs; ++i) {
+ total += ((i + 1) + 1) + ((i + 1) + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void emptyRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(-1)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ Range.Bucket bucket = range.getBucketByKey("*--1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*--1.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(-1.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("1000.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(1000d));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+
+ r1: 1, 2, 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("r1");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r1"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3));
+
+ bucket = range.getBucketByKey("r2");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = range.getBucketByKey("r3");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 4; i < numDocs; ++i) {
+ total += (i + 1) + (i + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = range.getBucketByKey("*-3.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-3.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = range.getBucketByKey("6.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("6.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void overlappingRanges() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(5)
+ .addRange(3, 6)
+ .addRange(4, 5)
+ .addUnboundedFrom(4))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(4));
+
+ Range.Bucket bucket = range.getBucketByKey("*-5.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("*-5.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getTo().doubleValue(), equalTo(5.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("3.0-6.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(3.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = range.getBucketByKey("4.0-5.0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("4.0-5.0"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(4.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(5.0));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = range.getBucketByKey("4.0-*");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKey(), equalTo("4.0-*"));
+ assertThat(bucket.getFrom().doubleValue(), equalTo(4.0));
+ assertThat(bucket.getTo().doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(range("range").addRange("0-2", 0.0, 2.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range range = bucket.getAggregations().get("range");
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(buckets.size(), is(1));
+ assertThat(buckets.get(0).getKey(), equalTo("0-2"));
+ assertThat(buckets.get(0).getFrom().doubleValue(), equalTo(0.0));
+ assertThat(buckets.get(0).getTo().doubleValue(), equalTo(2.0));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
new file mode 100644
index 0000000..dca4052
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;
+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard
+ * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to
+ * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets,
+ * we can make sure that the reduce is properly propagated by checking that empty buckets were created.
+ */
+public class ShardReduceTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", randomBoolean() ? 1 : randomIntBetween(2, 10))
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .build();
+ }
+
+ private IndexRequestBuilder indexDoc(String date, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("ip", "10.0.0." + value)
+ .field("location", GeoHashUtils.encode(52, 5, 12))
+ .field("date", date)
+ .field("term-l", 1)
+ .field("term-d", 1.5)
+ .field("term-s", "term")
+ .startObject("nested")
+ .field("date", date)
+ .endObject()
+ .endObject());
+ }
+
+ @Before
+ public void init() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested", "ip", "type=ip", "location", "type=geo_point")
+ .setSettings(indexSettings())
+ .execute().actionGet();
+
+ indexRandom(true,
+ indexDoc("2014-01-01", 1),
+ indexDoc("2014-01-02", 2),
+ indexDoc("2014-01-04", 3));
+ ensureSearchable();
+ }
+
+ @Test
+ public void testGlobal() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ DateHistogram histo = global.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testFilter() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(filter("filter").filter(FilterBuilders.matchAllFilter())
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filter");
+ DateHistogram histo = filter.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Missing missing = response.getAggregations().get("missing");
+ DateHistogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testGlobalWithFilterWithMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(filter("filter").filter(FilterBuilders.matchAllFilter())
+ .subAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ Filter filter = global.getAggregations().get("filter");
+ Missing missing = filter.getAggregations().get("missing");
+ DateHistogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(dateHistogram("histo").field("nested.date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Nested nested = response.getAggregations().get("nested");
+ DateHistogram histo = nested.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testStringTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-s")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("term").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testLongTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-l")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDoubleTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-d")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ DateHistogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(range("range").field("value").addRange("r1", 0, 10)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateRange("range").field("date").addRange("r1", "2014-01-01", "2014-01-10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateRange range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testIpRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(ipRange("range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ IPv4Range range = response.getAggregations().get("range");
+ DateHistogram histo = range.getBucketByKey("r1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(histogram("topHisto").field("value").interval(5)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram topHisto = response.getAggregations().get("topHisto");
+ DateHistogram histo = topHisto.getBucketByKey(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateHistogram("topHisto").field("date").interval(DateHistogram.Interval.MONTH)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateHistogram topHisto = response.getAggregations().get("topHisto");
+ DateHistogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testGeoHashGrid() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(geohashGrid("grid").field("location")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogram.Interval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid grid = response.getAggregations().get("grid");
+ DateHistogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
new file mode 100644
index 0000000..7f97c47
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST)
+public class ShardSizeTermsTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsText().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 5l)
+ .put("2", 4l)
+ .put("3", 3l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket: buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+ List<IndexRequestBuilder> indexOps = new ArrayList<IndexRequestBuilder>();
+
+ indexDoc("1", "1", 5, indexOps);
+ indexDoc("1", "2", 4, indexOps);
+ indexDoc("1", "3", 3, indexOps);
+ indexDoc("1", "4", 2, indexOps);
+ indexDoc("1", "5", 1, indexOps);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3, indexOps);
+ indexDoc("2", "2", 1, indexOps);
+ indexDoc("2", "3", 5, indexOps);
+ indexDoc("2", "4", 2, indexOps);
+ indexDoc("2", "5", 1, indexOps);
+
+ // total docs in shard "2" = 12
+
+ indexRandom(true, indexOps);
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ ensureSearchable();
+ }
+
+ private void indexDoc(String shard, String key, int times, List<IndexRequestBuilder> indexOps) throws Exception {
+ for (int i = 0; i < times; i++) {
+ indexOps.add(client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .endObject()));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
new file mode 100644
index 0000000..3bcc3bf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
@@ -0,0 +1,1024 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.base.Strings;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class StringTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "s_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "s_values";
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ public static String randomExecutionHint() {
+ return randomFrom(Arrays.asList(null, TermsAggregatorFactory.EXECUTION_HINT_VALUE_MAP, TermsAggregatorFactory.EXECUTION_HINT_VALUE_ORDINALS));
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[5]; // TODO randomize the size?
+ for (int i = 0; i < lowCardBuilders.length; i++) {
+ lowCardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, "val" + i)
+ .field("i", i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value("val" + i).value("val" + (i + 1)).endArray()
+ .endObject());
+ }
+ indexRandom(true, lowCardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size?
+
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i+"", 3, '0'))
+ .startArray(MULTI_VALUED_FIELD_NAME).value("val" + Strings.padStart(i+"", 3, '0')).value("val" + Strings.padStart((i+1)+"", 3, '0')).endArray()
+ .endObject());
+ }
+ indexRandom(true, highCardBuilders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return randomBoolean() ? bucket.getKey() : bucket.getKeyAsText().string();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithRegexFiltering() throws Exception {
+
+ // include without exclude
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+").exclude("(val000|val001)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // exclude without include
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).exclude("val0[1-9]+.+"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithRegexFiltering_WithFlags() throws Exception {
+
+ // include without exclude
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+ // with case insensitive flag on the include regex
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("VAL00.+", Pattern.CASE_INSENSITIVE))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+ // with multi-flag masking on the exclude regex
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).include("val00.+").exclude("( val000 | VAL001 )#this is a comment", Pattern.CASE_INSENSITIVE | Pattern.COMMENTS))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // exclude without include
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+ // with a "no flag" flag
+
+ response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME).exclude("val0[1-9]+.+", 0))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + Strings.padStart(i + "", 3, '0'));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + Strings.padStart(i+"", 3, '0')));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(count("count").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .script("'foo_' + _value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("_value.substring(0,3)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("val");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val"));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("'foo_' + _value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+
+ [foo_val0, foo_val1]
+ [foo_val1, foo_val2]
+ [foo_val2, foo_val3]
+ [foo_val3, foo_val4]
+ [foo_val4, foo_val5]
+
+
+ foo_val0 - doc_count: 1 - val_count: 2
+ foo_val1 - doc_count: 2 - val_count: 4
+ foo_val2 - doc_count: 2 - val_count: 4
+ foo_val3 - doc_count: 2 - val_count: 4
+ foo_val4 - doc_count: 2 - val_count: 4
+ foo_val5 - doc_count: 1 - val_count: 2
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script("'foo_' + _value")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat("term[" + key(bucket) + "]", valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_ExplicitSingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")
+ .subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .size(randomInt(5))
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void stringTermsNestedIntoPerBucketAggregator() throws Exception {
+ // no execution hint so that the logic that decides whether or not to use ordinals is executed
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(filter("filter").filter(termFilter(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ Filter filter = response.getAggregations().get("filter");
+
+ Terms terms = filter.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 2; i <= 4; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsSubAggregation() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("filter", true))
+ .subAggregation(filter("filter").filter(FilterBuilders.termFilter("foo", "bar")))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .order(Terms.Order.aggregation("stats.sum_of_squares", asc))
+ .subAggregation(extendedStats("stats").field("i"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
new file mode 100644
index 0000000..d6f1ac1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
new file mode 100644
index 0000000..df69c9b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ *
+ */
+public abstract class AbstractNumericTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+
+ for (int i = 0; i < 10; i++) { // TODO randomize the size and the params in here?
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject()));
+ }
+ indexRandom(true, builders);
+
+ // creating an index to test the empty buckets functionality. The way it works is by indexing
+ // two docs {value: 0} and {value : 2}, then building a histogram agg with interval 1 and with empty
+ // buckets computed.. the empty bucket is the one associated with key "1". then each test will have
+ // to check that this bucket exists with the appropriate sub aggregations.
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ public abstract void testEmptyAggregation() throws Exception;
+
+ public abstract void testUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField() throws Exception;
+
+ public abstract void testSingleValuedField_PartiallyUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testMultiValuedField() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testScript_SingleValued() throws Exception;
+
+ public abstract void testScript_SingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_ExplicitSingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_MultiValued() throws Exception;
+
+ public abstract void testScript_ExplicitMultiValued() throws Exception;
+
+ public abstract void testScript_MultiValued_WithParams() throws Exception;
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
new file mode 100644
index 0000000..41385b2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AvgTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(Double.isNaN(avg.getValue()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo(Double.NaN));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Test
+ @TestLogging("search:TRACE")
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
new file mode 100644
index 0000000..437261b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ExtendedStatsTests extends AbstractNumericTests {
+
+ private static double stdDev(int... vals) {
+ return Math.sqrt(variance(vals));
+ }
+
+ private static double variance(int... vals) {
+ double sum = 0;
+ double sumOfSqrs = 0;
+ for (int val : vals) {
+ sum += val;
+ sumOfSqrs += val * val;
+ }
+ return (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length;
+ }
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(extendedStats("stats")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getStdDeviation()), is(true));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getVariance(), equalTo(Double.NaN));
+ assertThat(stats.getStdDeviation(), equalTo(Double.NaN));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("new double[] { doc['value'].value, doc['value'].value - dec }").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+0+1+4+9+16+25+36+49+64+81));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.failure(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
new file mode 100644
index 0000000..3cb4ff3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MaxTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+ @Test
+ public void testUnmapped() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
new file mode 100644
index 0000000..8a73d91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MinTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_Reverse() throws Exception {
+ // test what happens when values arrive in reverse order since the min aggregator is optimized to work on sorted values
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value * -1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(-12d));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("List values = doc['values'].values; double[] res = new double[values.length]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
new file mode 100644
index 0000000..7437678
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
@@ -0,0 +1,370 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class StatsTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ @TestLogging("search:TRACE")
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("new double[] { doc['value'].value, doc['value'].value - dec }").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.failure(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
new file mode 100644
index 0000000..e7f59c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class SumTests extends AbstractNumericTests {
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBucketByKey(1l);
+ assertThat(bucket, notNullValue());
+
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + 1 }"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("new double[] { doc['value'].value, doc['value'].value + inc }").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Test
+ public void testMultiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
new file mode 100644
index 0000000..97bdce9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.count;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class ValueCountTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", between(0, 1))
+ .build();
+ }
+
+ @Before
+ public void init() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject())
+ .execute().actionGet();
+ }
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(0l));
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void singleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(20l));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
new file mode 100644
index 0000000..c1c7e21
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/package-info.java b/src/test/java/org/elasticsearch/search/aggregations/package-info.java
new file mode 100644
index 0000000..c2a0b23
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+@TestLogging("org.elasticsearch.action.search.type:TRACE")
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.test.junit.annotations.TestLogging; \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java b/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java
new file mode 100644
index 0000000..b9c1954
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.index.fielddata.BytesValues;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+public class FieldDataSourceTests extends ElasticsearchTestCase {
+
+ private static BytesValues randomBytesValues() {
+ final boolean multiValued = randomBoolean();
+ return new BytesValues(multiValued) {
+ @Override
+ public int setDocument(int docId) {
+ return randomInt(multiValued ? 10 : 1);
+ }
+ @Override
+ public BytesRef nextValue() {
+ scratch.copyChars(randomAsciiOfLength(10));
+ return scratch;
+ }
+
+ };
+ }
+
+ private static SearchScript randomScript() {
+ return new SearchScript() {
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object run() {
+ return randomAsciiOfLength(5);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return value;
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ }
+
+ @Override
+ public float runAsFloat() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long runAsLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double runAsDouble() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ }
+
+ private static void assertConsistent(BytesValues values) {
+ for (int i = 0; i < 10; ++i) {
+ final int valueCount = values.setDocument(i);
+ for (int j = 0; j < valueCount; ++j) {
+ final BytesRef term = values.nextValue();
+ assertEquals(term.hashCode(), values.currentValueHash());
+ assertTrue(term.bytesEquals(values.copyShared()));
+ }
+ }
+ }
+
+ @Test
+ public void bytesValuesWithScript() {
+ final BytesValues values = randomBytesValues();
+ FieldDataSource source = new FieldDataSource.Bytes() {
+
+ @Override
+ public BytesValues bytesValues() {
+ return values;
+ }
+
+ @Override
+ public MetaData metaData() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ SearchScript script = randomScript();
+ assertConsistent(new FieldDataSource.WithScript.BytesValues(source, script));
+ }
+
+ @Test
+ public void sortedUniqueBytesValues() {
+ assertConsistent(new FieldDataSource.Bytes.SortedAndUnique.SortedUniqueBytesValues(randomBytesValues()));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
new file mode 100644
index 0000000..b14dd6f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.search.aggregations.support.bytes.ScriptBytesValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptDoubleValues;
+import org.elasticsearch.search.aggregations.support.numeric.ScriptLongValues;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Map;
+
+public class ScriptValuesTests extends ElasticsearchTestCase {
+
+ private static class FakeSearchScript implements SearchScript {
+
+ private final Object[][] values;
+ int index;
+
+ FakeSearchScript(Object[][] values) {
+ this.values = values;
+ index = -1;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object run() {
+ // Script values are supposed to support null, single values, arrays and collections
+ final Object[] values = this.values[index];
+ if (values.length <= 1 && randomBoolean()) {
+ return values.length == 0 ? null : values[0];
+ }
+ return randomBoolean() ? values : Arrays.asList(values);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setNextReader(AtomicReaderContext reader) {
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ }
+
+ @Override
+ public void setNextDocId(int doc) {
+ index = doc;
+ }
+
+ @Override
+ public void setNextSource(Map<String, Object> source) {
+ }
+
+ @Override
+ public void setNextScore(float score) {
+ }
+
+ @Override
+ public float runAsFloat() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long runAsLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double runAsDouble() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ @Test
+ public void longs() {
+ final Object[][] values = new Long[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new Long[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomLong();
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptLongValues scriptValues = new ScriptLongValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.nextValue());
+ }
+ }
+ }
+
+ @Test
+ public void doubles() {
+ final Object[][] values = new Double[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new Double[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomDouble();
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptDoubleValues scriptValues = new ScriptDoubleValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.nextValue());
+ }
+ }
+ }
+
+ @Test
+ public void bytes() {
+ final String[][] values = new String[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new String[randomInt(8)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = RandomStrings.randomAsciiOfLength(getRandom(), 5);
+ }
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptBytesValues scriptValues = new ScriptBytesValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ assertEquals(values[i].length, scriptValues.setDocument(i));
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(new BytesRef(values[i][j]), scriptValues.nextValue());
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
new file mode 100644
index 0000000..680a35a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+
+/**
+ * This test basically verifies that search with a single shard active (cause we indexed to it) and other
+ * shards possibly not active at all (cause they haven't allocated) will still work.
+ */
+public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testIndexCausesIndexCreation() throws Exception {
+ searchWhileCreatingIndex(-1, 1); // 1 replica in our default...
+ }
+
+ @Test
+ @Slow
+ public void testNoReplicas() throws Exception {
+ searchWhileCreatingIndex(10, 0);
+ }
+
+ @Test
+ @Slow
+ public void testOneReplica() throws Exception {
+ searchWhileCreatingIndex(10, 1);
+ }
+
+ @Test
+ @Slow
+ public void testTwoReplicas() throws Exception {
+ searchWhileCreatingIndex(10, 2);
+ }
+
+ private void searchWhileCreatingIndex(int numberOfShards, int numberOfReplicas) throws Exception {
+
+ // make sure we have enough nodes to guaranty default QUORUM consistency.
+ // TODO: add a smarter choice based on actual consistency (when that is randomized)
+ int shardsNo = numberOfReplicas + 1;
+ int neededNodes = shardsNo <= 2 ? 1 : shardsNo / 2 + 1;
+ cluster().ensureAtLeastNumNodes(randomIntBetween(neededNodes, shardsNo));
+ for (int i = 0; i < 20; i++) {
+ logger.info("running iteration {}", i);
+ if (numberOfShards > 0) {
+ CreateIndexResponse createIndexResponse = prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas)).get();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+ }
+ client().prepareIndex("test", "type1", randomAsciiOfLength(5)).setSource("field", "test").execute().actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); // at least one shard should be successful when refreshing
+
+ // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+ while (status != ClusterHealthStatus.GREEN) {
+ // first, verify that search on the primary search works
+ SearchResponse searchResponse = client().prepareSearch("test").setPreference("_primary").setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ assertHitCount(searchResponse, 1);
+ // now, let it go to primary or replica, though in a randomized re-creatable manner
+ String preference = randomAsciiOfLength(5);
+ Client client = client();
+ searchResponse = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ if (searchResponse.getHits().getTotalHits() != 1) {
+ refresh();
+ SearchResponse searchResponseAfterRefresh = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit refresh hits are {}", searchResponseAfterRefresh.getHits().getTotalHits());
+ ensureGreen();
+ SearchResponse searchResponseAfterGreen = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit wait for green hits are {}", searchResponseAfterGreen.getHits().getTotalHits());
+ assertHitCount(searchResponse, 1);
+ }
+ assertHitCount(searchResponse, 1);
+ status = client().admin().cluster().prepareHealth("test").get().getStatus();
+ cluster().ensureAtLeastNumNodes(numberOfReplicas + 1);
+ }
+ cluster().wipeIndices("test");
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
new file mode 100644
index 0000000..4a700e9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest {
+
+ @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " +
+ "and between getting the cluster state to do the search, and executing it, " +
+ "the shard has fully relocated (moved from started on one node, to fully started on another node")
+ @Test
+ public void testSearchAndRelocateConcurrently0Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(0);
+ }
+
+ @TestLogging("org.elasticsearch.action.search.type:TRACE")
+ @Test
+ public void testSearchAndRelocateConcurrently1Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(1);
+ }
+
+ private void testSearchAndRelocateConcurrently(int numberOfReplicas) throws Exception {
+ final int numShards = between(10, 20);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas))
+ .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet();
+ ensureGreen();
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ final int numDocs = between(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ indexBuilders.add(client().prepareIndex("test", "type", Integer.toString(i))
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21)
+ .endObject().endObject()));
+ }
+ indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]));
+ assertHitCount(client().prepareSearch().get(), (long) (numDocs));
+ final int numIters = atLeast(10);
+ for (int i = 0; i < numIters; i++) {
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final List<Throwable> thrownExceptions = new CopyOnWriteArrayList<Throwable>();
+ Thread[] threads = new Thread[atLeast(1)];
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread() {
+ public void run() {
+ try {
+ while (!stop.get()) {
+ SearchResponse sr = client().prepareSearch().setSize(numDocs).get();
+ assertHitCount(sr, (long) (numDocs));
+ final SearchHits sh = sr.getHits();
+ assertThat("Expected hits to be the same size the actual hits array", sh.getTotalHits(),
+ equalTo((long) (sh.getHits().length)));
+ }
+ } catch (Throwable t) {
+ thrownExceptions.add(t);
+ }
+ }
+ };
+ }
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].start();
+ }
+ allowNodes("test", between(1, 3));
+ client().admin().cluster().prepareReroute().get();
+ ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).execute().actionGet();
+ stop.set(true);
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].join();
+ }
+ assertThat(resp.isTimedOut(), equalTo(false));
+
+ if (!thrownExceptions.isEmpty()) {
+ Client client = client();
+ String verified = "POST SEARCH OK";
+ for (int j = 0; j < 10; j++) {
+ if (client.prepareSearch().get().getHits().getTotalHits() != numDocs) {
+ verified = "POST SEARCH FAIL";
+ break;
+ }
+ }
+
+ assertThat("failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
new file mode 100644
index 0000000..b2a6bb4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.util.English;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double exceptionRate;
+ final double exceptionOnOpenRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ exceptionOnOpenRate = 1.0/between(5, 100);
+ exceptionRate = 0.0d;
+ } else {
+ exceptionRate = 1.0/between(5, 100);
+ exceptionOnOpenRate = 0.0d;
+ }
+ } else {
+ exceptionOnOpenRate = 1.0/between(5, 100);
+ exceptionRate = 1.0/between(5, 100);
+ }
+ } else {
+ // rarely no exception
+ exceptionRate = 0d;
+ exceptionOnOpenRate = 0d;
+ }
+
+ Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate)
+ .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)
+ .put(MockDirectoryHelper.CHECK_INDEX_ON_CLOSE, true);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ final boolean expectAllShardsFailed;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ expectAllShardsFailed = true;
+ } else {
+ numDocs = between(10, 100);
+ expectAllShardsFailed = false;
+ }
+ long numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = atLeast(10);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs-1);
+ long expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ // check match all
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
+ } catch (SearchPhaseExecutionException ex) {
+ if (!expectAllShardsFailed) {
+ throw ex;
+ } else {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+ }
+
+ }
+ }
+
+ @Test
+ public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ final int numShards = between(1, 5);
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double lowLevelRate;
+ final double topLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0/between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0/between(2, 10);
+ topLevelRate = 1.0/between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ Builder settings = settingsBuilder()
+ .put("index.number_of_shards", numShards)
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .put(MockInternalEngine.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockInternalEngine.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ensureSearchable();
+ final int numDocs = between(10, 100);
+ long numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs ; i++) {
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+
+ final int numSearches = atLeast(100);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs-1);
+ long expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(expectedResults));
+ }
+ // check match all
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
+ logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards);
+ if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(numCreated));
+ }
+
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+ }
+ }
+
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+
+ public static class RandomExceptionDirectoryReaderWrapper extends MockInternalEngine.DirectoryReaderWrapper {
+ private final Settings settings;
+ static class ThrowingSubReaderWrapper extends SubReaderWrapper implements ThrowingAtomicReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public AtomicReader wrap(AtomicReader reader) {
+ return new ThrowingAtomicReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingAtomicReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ case TermVectors:
+ case Terms:
+ case TermsEnum:
+ case Intersect:
+ case Norms:
+ case NumericDocValues:
+ case BinaryDocValues:
+ case SortedDocValues:
+ case SortedSetDocValues:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ public boolean wrapTerms(String field) {
+ return true;
+ }
+ }
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java b/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
new file mode 100644
index 0000000..84803d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ *
+ */
+public class TransportSearchFailuresTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ logger.info("Start Testing failed search with wrong query");
+ prepareCreate("test", 1, settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 2)
+ .put("routing.hash.type", "simple")).execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ index(client(), Integer.toString(i), "test", i);
+ }
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(9));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(3));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ allowNodes("test", 2);
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest("test")
+ .waitForYellowStatus().waitForRelocatingShards(0).waitForActiveShards(6)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getActiveShards(), equalTo(6));
+
+ refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(9));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(6));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ logger.info("Done Testing failed search");
+ }
+
+ private void index(Client client, String id, String nameValue, int age) throws IOException {
+ client.index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age)).consistencyLevel(WriteConsistencyLevel.ONE)).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .field("_boost", age * 10)
+ .endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java b/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
new file mode 100644
index 0000000..16b8b2e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
@@ -0,0 +1,439 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.search.facet.query.QueryFacet;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.action.search.SearchType.*;
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
+
+ private Set<String> prepareData() throws Exception {
+ Set<String> fullExpectedIds = Sets.newHashSet();
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder().put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 0)
+ .put("routing.hash.type", "simple")))
+ .actionGet();
+
+ ensureGreen();
+ for (int i = 0; i < 100; i++) {
+ index(client(), Integer.toString(i), "test", i);
+ fullExpectedIds.add(Integer.toString(i));
+ }
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ return fullExpectedIds;
+ }
+
+ private void index(Client client, String id, String nameValue, int age) throws IOException {
+ client().index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("nid", Integer.parseInt(id))
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .field("_boost", age * 10)
+ .endObject();
+ }
+
+ @Test
+ public void testDfsQueryThenFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+// System.out.println("max_score: " + searchResponse.hits().maxScore());
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.score() + ":" + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ }
+ }
+
+ @Test
+ public void testDfsQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true).sort("age", SortOrder.ASC);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60)));
+ }
+ }
+
+ @Test
+ public void testQueryThenFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .sort("nid", SortOrder.DESC) // we have to sort here to have some ordering with dist scoring
+ .from(0).size(60).explain(true);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ }
+ }
+
+ @Test
+ public void testQueryThenFetchWithFrom() throws Exception {
+ Set<String> fullExpectedIds = prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(matchAllQuery())
+ .explain(true);
+
+ Set<String> collectedIds = Sets.newHashSet();
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source.from(0).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ searchResponse = client().search(searchRequest("test").source(source.from(60).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ assertThat(collectedIds, equalTo(fullExpectedIds));
+ }
+
+ @Test
+ public void testQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(60).explain(true).sort("age", SortOrder.ASC);
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i)));
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60)));
+ }
+ }
+
+ @Test
+ public void testQueryAndFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ // we can't really check here, since its query and fetch, and not controlling distribution
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDfsQueryAndFetch() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testSimpleFacets() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder sourceBuilder = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true)
+ .facet(FacetBuilders.queryFacet("all", termQuery("multi", "test")).global(true))
+ .facet(FacetBuilders.queryFacet("test1", termQuery("name", "test1")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(sourceBuilder)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+
+ assertThat(searchResponse.getFacets().facet(QueryFacet.class, "test1").getCount(), equalTo(1l));
+ assertThat(searchResponse.getFacets().facet(QueryFacet.class, "all").getCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed search with wrong query");
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(3));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(3));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedSearchWithWrongFrom() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed search with wrong from");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(1000).size(20).explain(true);
+ SearchResponse response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertThat(response.getHits().hits().length, equalTo(0));
+ assertThat(response.getTotalShards(), equalTo(3));
+ assertThat(response.getSuccessfulShards(), equalTo(3));
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add custom score query with missing script
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.customScoreQuery(QueryBuilders.termQuery("nid", 1))))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery_withFunctionScore() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add custom score query with missing script
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1)).add(new ScriptScoreFunctionBuilder())))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java
new file mode 100644
index 0000000..a96dba1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java
@@ -0,0 +1,2246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.child;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.search.child.ScoreType;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void multiLevelChild() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("grandchild", "_parent", "type=child")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "c_value1").setParent("p1").get();
+ client().prepareIndex("test", "grandchild", "gc1").setSource("gc_field", "gc_value1")
+ .setParent("c1").setRouting("gc1").get();
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildFilter(
+ "child",
+ filteredQuery(termQuery("c_field", "c_value1"),
+ hasChildFilter("grandchild", termQuery("gc_field", "gc_value1")))))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", termFilter("p_field", "p_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("child", termFilter("c_field", "c_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "p_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("child", termQuery("c_field", "c_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+ }
+
+ @Test
+ // see #2744
+ public void test2744() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("foo")
+ .addMapping("test", "_parent", "type=foo")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "foo", "1").setSource("foo", 1).get();
+ client().prepareIndex("test", "test").setSource("foo", 1).setParent("1").get();
+ client().admin().indices().prepareRefresh().get();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("test", matchQuery("foo", 1))).execute()
+ .actionGet();
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ }
+
+ @Test
+ public void simpleChildQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ refresh();
+
+ // TEST FETCHING _parent from child
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(idsQuery("child").ids("c1")).addFields("_parent").execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+
+ // TEST matching on parent
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("child._parent", "p1")).addFields("_parent").execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent", "p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(queryString("_parent:p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ // TOP CHILDREN QUERY
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "yellow"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "blue")).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "red")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS PARENT
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c2"));
+ }
+
+ @Test
+ public void testClearIdCacheBug() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+
+ refresh();
+ // No _parent field yet, there shouldn't be anything in the parent id cache
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // Now add mapping + children
+ client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource("_parent", "type=parent")
+ .get();
+
+ // index simple data
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ client().admin().indices().prepareRefresh().get();
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ // automatic warm-up has populated the cache since it found a parent field mapper
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache("test").setIdCache(true).get();
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setIdCache(true).get();
+ assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3290
+ public void testCachingBug_withFqueryFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ // index simple data
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "parent", Integer.toString(i)).setSource("p_field", i));
+ }
+ indexRandom(randomBoolean(), builders);
+ builders.clear();
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i)).setSource("c_field", i).setParent("" + 0));
+ }
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i + 10)).setSource("c_field", i + 10).setParent(Integer.toString(i)));
+ }
+
+ if (randomBoolean()) {
+ break; // randomly break out and dont' have deletes / updates
+ }
+ }
+ indexRandom(true, builders);
+
+ for (int i = 1; i <= 10; i++) {
+ logger.info("Round {}", i);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())).cache(true))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ }
+ }
+
+ @Test
+ public void testHasParentFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ Map<String, Set<String>> parentToChildren = newHashMap();
+ // Childless parent
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p0").get();
+ parentToChildren.put("p0", new HashSet<String>());
+
+ String previousParentId = null;
+ int numChildDocs = 32;
+ int numChildDocsPerParent = 0;
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 1; i <= numChildDocs; i++) {
+
+ if (previousParentId == null || i % numChildDocsPerParent == 0) {
+ previousParentId = "p" + i;
+ builders.add(client().prepareIndex("test", "parent", previousParentId).setSource("p_field", previousParentId));
+ numChildDocsPerParent++;
+ }
+
+ String childId = "c" + i;
+ builders.add(client().prepareIndex("test", "child", childId).setSource("c_field", childId).setParent(previousParentId));
+
+ if (!parentToChildren.containsKey(previousParentId)) {
+ parentToChildren.put(previousParentId, new HashSet<String>());
+ }
+ assertThat(parentToChildren.get(previousParentId).add(childId), is(true));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[0]));
+
+ assertThat(parentToChildren.isEmpty(), equalTo(false));
+ for (Map.Entry<String, Set<String>> parentToChildrenEntry : parentToChildren.entrySet()) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", parentToChildrenEntry.getKey()))))
+ .setSize(numChildDocsPerParent).get();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ Set<String> childIds = parentToChildrenEntry.getValue();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) childIds.size()));
+ for (int i = 0; i < searchResponse.getHits().totalHits(); i++) {
+ assertThat(childIds.remove(searchResponse.getHits().getAt(i).id()), is(true));
+ assertThat(searchResponse.getHits().getAt(i).score(), is(1.0f));
+ }
+ assertThat(childIds.size(), is(0));
+ }
+ }
+
+ @Test
+ public void simpleChildQueryWithFlush() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data with flushes, so we have many segments
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ refresh();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD QUERY
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD FILTER
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ }
+
+ @Test
+ public void simpleChildQueryWithFlushAnd3Shards() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data with flushes, so we have many segments
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+
+ client().admin().indices().prepareRefresh().get();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD QUERY
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD FILTER
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ }
+
+ @Test
+ public void testScopedFacet() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(topChildrenQuery("child", boolQuery().should(termQuery("c_field", "red")).should(termQuery("c_field", "yellow"))))
+ .addFacet(
+ termsFacet("facet1")
+ .facetFilter(boolFilter().should(termFilter("c_field", "red")).should(termFilter("c_field", "yellow")))
+ .field("c_field").global(true)).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ assertThat(searchResponse.getFacets().facets().size(), equalTo(1));
+ TermsFacet termsFacet = searchResponse.getFacets().facet("facet1");
+ assertThat(termsFacet.getEntries().size(), equalTo(2));
+ assertThat(termsFacet.getEntries().get(0).getTerm().string(), equalTo("red"));
+ assertThat(termsFacet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(termsFacet.getEntries().get(1).getTerm().string(), equalTo("yellow"));
+ assertThat(termsFacet.getEntries().get(1).getCount(), equalTo(1));
+ }
+
+ @Test
+ public void testDeletedParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ // TOP CHILDREN QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow")))
+ .get();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ // update p1 and see what that we get updated values...
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1_updated").get();
+ client().admin().indices().prepareRefresh().get();
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
+ }
+
+ @Test
+ public void testDfsSearchType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(queryString("c_field:*"))))).get();
+ assertNoFailures(searchResponse);
+
+ searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(queryString("p_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+
+ searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testFixAOBEIfTopChildrenIsWrappedInMusNotClause() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testTopChildrenReSearchBug() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ int numberOfParents = 4;
+ int numberOfChildrenPerParent = 123;
+ for (int i = 1; i <= numberOfParents; i++) {
+ String parentId = String.format(Locale.ROOT, "p%d", i);
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", String.format(Locale.ROOT, "p_value%d", i)).execute()
+ .actionGet();
+ for (int j = 1; j <= numberOfChildrenPerParent; j++) {
+ client().prepareIndex("test", "child", String.format(Locale.ROOT, "%s_c%d", parentId, j))
+ .setSource("c_field1", parentId, "c_field2", i % 2 == 0 ? "even" : "not_even").setParent(parentId).execute()
+ .actionGet();
+ }
+ }
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field1", "p3")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field2", "even"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+ }
+
+ @Test
+ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrChildDocs() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testCountApiUsage() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(topChildrenQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testExplainUsage() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.getExplanation().getDescription(), equalTo("not implemented yet..."));
+ }
+
+ @Test
+ public void testScoreForParentChildQueries() throws Exception {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "child",
+ jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject())
+ .addMapping(
+ "child1",
+ jsonBuilder().startObject().startObject("child1").startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ indexRandom(false, createDocBuilders().toArray(new IndexRequestBuilder[0]));
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("sum"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(3f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("max"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(2f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery("child",
+ QueryBuilders.customScoreQuery(matchQuery("c_field2", 0)).script("doc['c_field1'].value")).scoreType("avg"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1.5f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasParentQuery("parent",
+ QueryBuilders.customScoreQuery(matchQuery("p_field1", "p_value3")).script("doc['p_field2'].value"))
+ .scoreType("score")).addSort(SortBuilders.fieldSort("c_field3")).addSort(SortBuilders.scoreSort())
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(7l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("13"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("14"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("15"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[3].id(), equalTo("16"));
+ assertThat(response.getHits().hits()[3].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[4].id(), equalTo("17"));
+ assertThat(response.getHits().hits()[4].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[5].id(), equalTo("18"));
+ assertThat(response.getHits().hits()[5].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[6].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[6].score(), equalTo(5f));
+ }
+
+ List<IndexRequestBuilder> createDocBuilders() {
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ // Parent 1 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("1").setIndex("test").setSource("p_field", "p_value1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("2").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("3").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("4").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("5").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("6").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("1"));
+
+ // Parent 2 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("2").setIndex("test").setSource("p_field", "p_value2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("7").setIndex("test")
+ .setSource("c_field1", 3, "c_field2", 0).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("8").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("9").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("p")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("10").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("11").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("12").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("2"));
+
+ // Parent 3 and its children
+
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("3").setIndex("test")
+ .setSource("p_field1", "p_value3", "p_field2", 5));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("13").setIndex("test")
+ .setSource("c_field1", 4, "c_field2", 0, "c_field3", 0).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("14").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1, "c_field3", 1).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("15").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 2).setParent("3")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("16").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 3).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("17").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 4).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("18").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 5).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child1").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 6).setParent("3"));
+
+ return indexBuilders;
+ }
+
+ @Test
+ public void testScoreForParentChildQueries_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child1", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ indexRandom(false, createDocBuilders().toArray(new IndexRequestBuilder[0]));
+ refresh();
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("sum")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(3f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("max")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(2f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0), scriptFunction("doc['c_field1'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("avg")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1.5f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasParentQuery(
+ "parent",
+ QueryBuilders.functionScoreQuery(matchQuery("p_field1", "p_value3"), scriptFunction("doc['p_field2'].value"))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("score"))
+ .addSort(SortBuilders.fieldSort("c_field3")).addSort(SortBuilders.scoreSort()).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(7l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("13"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("14"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("15"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[3].id(), equalTo("16"));
+ assertThat(response.getHits().hits()[3].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[4].id(), equalTo("17"));
+ assertThat(response.getHits().hits()[4].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[5].id(), equalTo("18"));
+ assertThat(response.getHits().hits()[5].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[6].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[6].score(), equalTo(5f));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/2536
+ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()).setRefresh(true)
+ .get();
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")).scoreType("max"))
+ .get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value"))).get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).scoreType("score"))
+ .get();
+ assertThat(response.getFailedShards(), equalTo(0));
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testHasChildAndHasParentFilter_withFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "3").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", termFilter("c_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentFilter("parent", termFilter("p_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ }
+
+ @Test
+ public void testSimpleQueryRewrite() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ int childId = 0;
+ for (int i = 0; i < 10; i++) {
+ String parentId = String.format(Locale.ROOT, "p%03d", i);
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", parentId).get();
+ int j = childId;
+ for (; j < childId + 50; j++) {
+ String childUid = String.format(Locale.ROOT, "c%03d", j);
+ client().prepareIndex("test", "child", childUid).setSource("c_field", childUid).setParent(parentId).get();
+ }
+ childId = j;
+ }
+ refresh();
+
+ SearchType[] searchTypes = new SearchType[]{SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH};
+ for (SearchType searchType : searchTypes) {
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasChildQuery("child", prefixQuery("c_field", "c")).scoreType("max")).addSort("p_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(10L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("p000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("p001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("p002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("p003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("p004"));
+
+ searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasParentQuery("parent", prefixQuery("p_field", "p")).scoreType("score")).addSort("c_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(500L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("c000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("c001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("c002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("c003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("c004"));
+
+ searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(topChildrenQuery("child", prefixQuery("c_field", "c"))).addSort("p_field", SortOrder.ASC).setSize(5)
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(10L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("p000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("p001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("p002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("p003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("p004"));
+ }
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3144
+ public void testReIndexingParentAndChildDocuments() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum")).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ // re-index
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "d" + i).setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().admin().indices().prepareRefresh("test").get();
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum"))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ assertThat(searchResponse.getHits().getAt(1).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3203
+ public void testHasChildQueryWithMinimumScore() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "x").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "x").setParent("p2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", matchAllQuery()).scoreType("sum"))
+ .setMinScore(3) // Score needs to be 3 or above!
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ }
+
+ @Test
+ public void testParentFieldFilter() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", -1))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child2", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // test term filter
+ SearchResponse response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1")))
+ .get();
+ assertHitCount(response, 0l);
+
+ client().prepareIndex("test", "some_type", "1").setSource("field", "value").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "value").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "value").setParent("p1").get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 0l);
+ refresh();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ client().prepareIndex("test", "parent2", "p1").setSource("p_field", "value").setRefresh(true).get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ // test terms filter
+ client().prepareIndex("test", "child2", "c1").setSource("c_field", "value").setParent("p1").get();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "p1", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ response = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("_parent", "parent#p1", "parent2#p1"))).get();
+ assertHitCount(response, 2l);
+ }
+
+ @Test
+ public void testHasChildNotBeingCached() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "parent", "p5").setSource("p_field", "p_value5").get();
+ client().prepareIndex("test", "parent", "p6").setSource("p_field", "p_value6").get();
+ client().prepareIndex("test", "parent", "p7").setSource("p_field", "p_value7").get();
+ client().prepareIndex("test", "parent", "p8").setSource("p_field", "p_value8").get();
+ client().prepareIndex("test", "parent", "p9").setSource("p_field", "p_value9").get();
+ client().prepareIndex("test", "parent", "p10").setSource("p_field", "p_value10").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ client().prepareIndex("test", "child", "c2").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")).cache(true)))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ @Test
+ public void testDeleteByQuery_has_child() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get();
+ client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get();
+ client().admin().indices().prepareRefresh().get();
+ // p4 will not be found via search api, but will be deleted via delete_by_query api!
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get();
+ client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testDeleteByQuery_has_child_SingleRefresh() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get();
+ client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get();
+ client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 3l);
+
+ client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasChild("child", "c_field", "blue"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ private QueryBuilder randomHasChild(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasChildFilter(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasChildFilter(type, termQuery(field, value)));
+ }
+ } else {
+ return hasChildQuery(type, termQuery(field, value));
+ }
+ }
+
+ @Test
+ public void testDeleteByQuery_has_parent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 1)
+ .put("index.refresh_interval", "-1")
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ client().prepareDeleteByQuery("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ client().admin().indices().prepareRefresh("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ private QueryBuilder randomHasParent(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasParentFilter(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasParentFilter(type, termQuery(field, value)));
+ }
+ } else {
+ return hasParentQuery(type, termQuery(field, value));
+ }
+ }
+
+ @Test
+ // Relates to bug: https://github.com/elasticsearch/elasticsearch/issues/3818
+ public void testHasChildQueryOnlyReturnsSingleChildType() {
+ client().admin().indices().prepareCreate("grandissue")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("grandparent", "name", "type=string")
+ .addMapping("parent", "_parent", "type=grandparent")
+ .addMapping("child_type_one", "_parent", "type=parent")
+ .addMapping("child_type_two", "_parent", "type=parent")
+ .get();
+
+ client().prepareIndex("grandissue", "grandparent", "1").setSource("name", "Grandpa").get();
+ client().prepareIndex("grandissue", "parent", "2").setParent("1").setSource("name", "Dana").get();
+ client().prepareIndex("grandissue", "child_type_one", "3").setParent("2").setRouting("1")
+ .setSource("name", "William")
+ .get();
+ client().prepareIndex("grandissue", "child_type_two", "4").setParent("2").setRouting("1")
+ .setSource("name", "Kate")
+ .get();
+ client().admin().indices().prepareRefresh("grandissue").get();
+
+ SearchResponse searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_one",
+ boolQuery().must(
+ queryString("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_two",
+ boolQuery().must(
+ queryString("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void indexChildDocWithNoParentMapping() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child1")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1", "_parent", "bla").get();
+ try {
+ client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
+ }
+ try {
+ client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured"));
+ }
+
+ refresh();
+ }
+
+ @Test
+ public void testAddingParentToExistingMapping() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).get();
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child").setSource("number", "type=integer")
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ Map<String, Object> mapping = getMappingsResponse.getMappings().get("test").get("child").getSourceAsMap();
+ assertThat(mapping.size(), equalTo(1));
+ assertThat(mapping.get("properties"), notNullValue());
+
+ try {
+ // Adding _parent metadata field to existing mapping is prohibited:
+ client().admin().indices().preparePutMapping("test").setType("child").setSource(jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).get();
+ fail();
+ } catch (MergeMappingException e) {
+ assertThat(e.getMessage(), equalTo("Merge failed with failures {[The _parent field can't be added or updated]}"));
+ }
+ }
+
+ @Test
+ // The SimpleIdReaderTypeCache#docById method used lget, which can't be used if a map is shared.
+ public void testTopChildrenBug_concurrencyIssue() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ int numThreads = 10;
+ final CountDownLatch latch = new CountDownLatch(numThreads);
+ final AtomicReference<AssertionError> holder = new AtomicReference<AssertionError>();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (int i = 0; i < 100; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "blue")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "red")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+ } catch (AssertionError error) {
+ holder.set(error);
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+
+ for (int i = 0; i < 10; i++) {
+ new Thread(r).start();
+ }
+ latch.await();
+ if (holder.get() != null) {
+ throw holder.get();
+ }
+ }
+
+ @Test
+ public void testHasChildQueryWithNestedInnerObjects() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .addMapping("parent", "objects", "type=nested")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1")
+ .setSource(jsonBuilder().startObject().field("p_field", "1").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .startObject().field("i_field", "3").endObject()
+ .startObject().field("i_field", "4").endObject()
+ .startObject().field("i_field", "5").endObject()
+ .startObject().field("i_field", "6").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "parent", "p2")
+ .setSource(jsonBuilder().startObject().field("p_field", "2").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ refresh();
+
+ String scoreMode = ScoreType.values()[getRandom().nextInt(ScoreType.values().length)].name().toLowerCase(Locale.ROOT);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "blue")).scoreType(scoreMode), notFilter(termFilter("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "red")).scoreType(scoreMode), notFilter(termFilter("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ @Test
+ public void testNamedFilters() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "1")).queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "1")).filterName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasParentFilter("parent", termQuery("p_field", "1")).filterName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+ }
+
+ @Test
+ public void testParentChildQueriesNoParentType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.refresh_interval", -1)
+ .put("index.number_of_replicas", 0))
+ .get();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().admin().indices().prepareRefresh().get();
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setPostFilter(hasChildFilter("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")).score("max"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ // can't fail, because there is no check, this b/c parent type can be refered by many child types.
+ client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ client().prepareSearch("test")
+ .setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
+ .get();
+ }
+
+ @Test
+ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.refresh_interval", -1)
+ .put("index.number_of_replicas", 0))
+ .get();
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().admin().indices().prepareRefresh().get();
+ assertAcked(client().admin()
+ .indices()
+ .preparePutMapping("test")
+ .setType("child")
+ .setSource("_parent", "type=parent"));
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasChildFilter("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(topChildrenQuery("child", termQuery("c_field", "1")).score("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+ }
+
+ @Test
+ public void testParentChildCaching() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.refresh_interval", -1)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ client().admin().indices().prepareOptimize("test").setFlush(true).setWaitForMerge(true).get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c4").setParent("p3").setSource("c_field", "green").get();
+ client().prepareIndex("test", "child", "c5").setParent("p3").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c6").setParent("p4").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ for (int i = 0; i < 2; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolFilter()
+ .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red")))
+ .must(matchAllFilter())
+ .cache(true)))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolFilter()
+ .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red")))
+ .must(matchAllFilter())
+ .cache(true)))
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testParentChildQueriesViaScrollApi() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .get();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p" + i).setSource("{}").get();
+ client().prepareIndex("test", "child", "c" + i).setSource("{}").setParent("p" + i).get();
+ }
+
+ client().admin().indices().prepareRefresh().get();
+
+ QueryBuilder[] queries = new QueryBuilder[]{
+ hasChildQuery("child", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasChildFilter("child", matchAllQuery())),
+ hasParentQuery("parent", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasParentFilter("parent", matchAllQuery())),
+ topChildrenQuery("child", matchAllQuery()).factor(10)
+ };
+
+ for (QueryBuilder query : queries) {
+ SearchResponse scrollResponse = client().prepareSearch("test")
+ .setScroll(TimeValue.timeValueSeconds(30))
+ .setSize(1)
+ .addField("_id")
+ .setQuery(query)
+ .setSearchType("scan")
+ .execute()
+ .actionGet();
+
+ assertThat(scrollResponse.getFailedShards(), equalTo(0));
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+
+ int scannedDocs = 0;
+ do {
+ scrollResponse = client()
+ .prepareSearchScroll(scrollResponse.getScrollId())
+ .setScroll(TimeValue.timeValueSeconds(30)).get();
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+ scannedDocs += scrollResponse.getHits().getHits().length;
+ } while (scrollResponse.getHits().getHits().length > 0);
+ assertThat(scannedDocs, equalTo(10));
+ }
+ }
+
+ @Test
+ public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("field", "value")
+ .get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("field", "value")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", matchAllQuery()))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", matchAllQuery()))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // Internally the has_child and has_parent use filter for the type field, which end up in the filter cache,
+ // so by first checking how much they take by executing has_child and has_parent *query* we can set a base line
+ // for the filter cache size in this test.
+ IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ long initialCacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c has_child and has_parent can't be cached.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(initialCacheSize));
+ }
+
+ private static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder queryBuilder) {
+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, queryBuilder);
+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildFilterBuilder;
+ }
+
+ private static HasChildFilterBuilder hasChildFilter(String type, FilterBuilder filterBuilder) {
+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, filterBuilder);
+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildFilterBuilder;
+ }
+
+ private static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) {
+ HasChildQueryBuilder hasChildQueryBuilder = QueryBuilders.hasChildQuery(type, queryBuilder);
+ hasChildQueryBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildQueryBuilder;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
new file mode 100644
index 0000000..a16acc1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.compress;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.compress.lzf.LZFCompressor;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SearchSourceCompressTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSourceCompressionLZF() throws IOException {
+ CompressorFactory.setDefaultCompressor(new LZFCompressor());
+ verifySource(true);
+ verifySource(false);
+ verifySource(null);
+ }
+
+ private void verifySource(Boolean compress) throws IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("compress", compress).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(buildSource(i)).execute().actionGet();
+ }
+ client().prepareIndex("test", "type1", Integer.toString(10000)).setSource(buildSource(10000)).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(10000)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(10000).bytes().toBytes()));
+
+ for (int i = 1; i < 100; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(i))).execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).source(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ }
+
+ private XContentBuilder buildSource(int count) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < count; j++) {
+ sb.append("value").append(j).append(' ');
+ }
+ builder.field("field", sb.toString());
+ return builder.endObject();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java b/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java
new file mode 100644
index 0000000..e1ea9e0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/customscore/CustomScoreSearchTests.java
@@ -0,0 +1,1048 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.customscore;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class CustomScoreSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testScoreExplainBug_2283() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().execute()
+ .actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2")
+ .add(termFilter("field", "value2"), "3").scoreMode("first")).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ Explanation explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(3f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+
+ // Same query but with boost
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2")
+ .add(termFilter("field", "value2"), "3").boost(2).scoreMode("first")).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(6f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[2].getDescription(), equalTo("queryBoost"));
+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));
+ }
+
+ @Test
+ public void testScoreExplainBug_2283_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().execute()
+ .actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("first").add(termFilter("field", "value4"), scriptFunction("2"))
+ .add(termFilter("field", "value2"), scriptFunction("3"))).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ Explanation explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(3f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+
+ // Same query but with boost
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("first").add(termFilter("field", "value4"), scriptFunction("2"))
+ .add(termFilter("field", "value2"), scriptFunction("3")).boost(2)).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));
+ logger.info("--> Hit[0] {} Explanation:\n {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)
+ .explanation());
+ explanation = searchResponse.getHits().getAt(0).explanation();
+ assertNotNull(explanation);
+ assertThat(explanation.isMatch(), equalTo(true));
+ assertThat(explanation.getValue(), equalTo(6f));
+ assertThat(explanation.getDescription(), equalTo("function score, product of:"));
+
+ assertThat(explanation.getDetails().length, equalTo(3));
+ assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+ assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));
+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));
+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));
+ assertThat(explanation.getDetails()[2].getDescription(), equalTo("queryBoost"));
+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));
+ }
+
+ @Test
+ public void testMultiValueCustomScriptBoost() throws ElasticsearchException, IOException {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("snum")
+ .field("type", "string").endObject().startObject("dnum").field("type", "double").endObject()
+ .startObject("slnum").field("type", "long").endObject().startObject("gp").field("type", "geo_point")
+ .endObject().endObject().endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String[] values = new String[100];
+ String[] gp = new String[100];
+
+ long[] lValues = new long[100];
+ double[] dValues = new double[100];
+ int offset = 1;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ offset++;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running min(doc['num1'].value)");
+ SearchResponse response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(customScoreQuery(termQuery("test", "value"))
+ .script("c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script(
+ "c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testMultiValueCustomScriptBoost_withFunctionScore() throws ElasticsearchException, IOException {
+
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("snum")
+ .field("type", "string").endObject().startObject("dnum").field("type", "double").endObject()
+ .startObject("slnum").field("type", "long").endObject().startObject("gp").field("type", "geo_point")
+ .endObject().endObject().endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String[] values = new String[100];
+ String[] gp = new String[100];
+
+ long[] lValues = new long[100];
+ double[] dValues = new double[100];
+ int offset = 1;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ offset++;
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "" + (i + offset);
+ gp[i] = "" + (i + offset) + "," + (i + offset);
+ lValues[i] = (i + offset);
+ dValues[i] = (i + offset);
+ }
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("snum", values).field("dnum", dValues)
+ .field("lnum", lValues).field("gp", gp).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running min(doc['num1'].value)");
+ SearchResponse response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ response = client()
+ .search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource()
+ .explain(true)
+ .query(functionScoreQuery(
+ termQuery("test", "value"),
+ scriptFunction("c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min")))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testCustomScriptBoost() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("num1", 2.0f).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(customScoreQuery(termQuery("test", "value")).script("doc['num1'].value"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running -doc['num1'].value");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(customScoreQuery(termQuery("test", "value")).script("-doc['num1'].value"))))
+ .actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+
+ logger.info("running pow(doc['num1'].value, 2)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true)
+ .query(customScoreQuery(termQuery("test", "value")).script("pow(doc['num1'].value, 2)")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running max(doc['num1'].value, 1)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("max(doc['num1'].value, 1d)")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running doc['num1'].value * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("doc['num1'].value * _score")))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running param1 * param2 * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termQuery("test", "value")).script("param1 * param2 * _score").param("param1", 2)
+ .param("param2", 2)))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+
+ logger.info("running param1 * param2 * _score with filter instead of query");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ customScoreQuery(termFilter("test", "value")).script("param1 * param2 * _score").param("param1", 2)
+ .param("param2", 2)))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ }
+
+ @Test
+ public void testCustomScriptBoost_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value check").field("num1", 2.0f).endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("doc['num1'].value"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running -doc['num1'].value");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("-doc['num1'].value"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+
+ logger.info("running pow(doc['num1'].value, 2)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("pow(doc['num1'].value, 2)"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running max(doc['num1'].value, 1)");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("max(doc['num1'].value, 1d)"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running doc['num1'].value * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("doc['num1'].value * _score"))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).id(), equalTo("1"));
+
+ logger.info("running param1 * param2 * _score");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), scriptFunction("param1 * param2 * _score")
+ .param("param1", 2).param("param2", 2))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+
+ logger.info("running param1 * param2 * _score with filter instead of query");
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termFilter("test", "value"),
+ scriptFunction("param1 * param2 * _score").param("param1", 2).param("param2", 2))))).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());
+ assertSearchHits(response, "1", "2");
+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score
+ // is
+ // always
+ // 1
+ }
+
+ @Test
+ public void testTriggerBooleanScorer() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(customFiltersScoreQuery(fuzzyQuery("field", "value")).add(FilterBuilders.idsFilter("type").addIds("1"), 3))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ }
+
+ @Test
+ public void testTriggerBooleanScorer_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(fuzzyQuery("field", "value")).add(FilterBuilders.idsFilter("type").addIds("1"),
+ factorFunction(3))).execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ }
+
+ @Test
+ public void testCustomFiltersScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), "2").add(termFilter("field", "value2"),
+ "3")).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value2"), 3)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("total").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("max").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("1"), equalTo("3"))); // could
+ // be
+ // both
+ // depending
+ // on
+ // the
+ // order
+ // of
+ // the
+ // docs
+ // internally
+ // (lucene
+ // order)
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("avg").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));
+ logger.info("--> Hit[1] {} Explanation {}", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("min").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(3.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(matchAllQuery()).scoreMode("multiply").add(termFilter("field", "value4"), 2)
+ .add(termFilter("field", "value1"), 3).add(termFilter("color", "red"), 5)).setExplain(true).execute()
+ .actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(15.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(5.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("first")
+ .add(termFilter("field", "value4"), 2).add(termFilter("field", "value3"), 3)
+ .add(termFilter("field", "value2"), 4)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ customFiltersScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("multiply")
+ .add(termFilter("field", "value4"), 2).add(termFilter("field", "value1"), 3)
+ .add(termFilter("color", "red"), 5)).setExplain(true).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+ }
+
+ @Test
+ public void testCustomFiltersScore_withFunctionScore() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute()
+ .actionGet();
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "color", "blue").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", "value3", "color", "red").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field", "value4", "color", "blue").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery())
+ .add(termFilter("field", "value4"), scriptFunction("2")).add(
+ termFilter("field", "value2"), scriptFunction("3"))).setExplain(true)
+ .execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).add(termFilter("field", "value4"), factorFunction(2)).add(
+ termFilter("field", "value2"), factorFunction(3))).setExplain(true).execute().actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(1.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo("1"), equalTo("3")));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("sum")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("max")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("1"), equalTo("3"))); // could
+ // be
+ // both
+ // depending
+ // on
+ // the
+ // order
+ // of
+ // the
+ // docs
+ // internally
+ // (lucene
+ // order)
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("avg")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));
+ logger.info("--> Hit[1] {} Explanation {}", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("min")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(3.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).scoreMode("multiply")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(15.0f));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(5.0f));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(2.0f));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("first")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value3"), factorFunction(3))
+ .add(termFilter("field", "value2"), factorFunction(4))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(termsQuery("field", "value1", "value2", "value3", "value4")).scoreMode("multiply")
+ .add(termFilter("field", "value4"), factorFunction(2))
+ .add(termFilter("field", "value1"), factorFunction(3))
+ .add(termFilter("color", "red"), factorFunction(5))).setExplain(true).execute()
+ .actionGet();
+
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(searchResponse.getHits().getAt(0).explanation().getValue()));
+ logger.info("--> Hit[0] {} Explanation {}", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).score(), equalTo(searchResponse.getHits().getAt(1).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("4"));
+ assertThat(searchResponse.getHits().getAt(2).score(), equalTo(searchResponse.getHits().getAt(2).explanation().getValue()));
+ assertThat(searchResponse.getHits().getAt(3).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java b/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java
new file mode 100644
index 0000000..0ad73d3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ConcurrentDuel.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class ConcurrentDuel<T> {
+
+
+ private final ExecutorService pool;
+ private final int numExecutorThreads;
+
+ public ConcurrentDuel(int numThreads) {
+ pool = Executors.newFixedThreadPool(numThreads);
+ this.numExecutorThreads = numThreads;
+ }
+
+ public void close() {
+ pool.shutdown();
+ }
+
+ public List<T> runDuel(final DuelExecutor<T> executor, int iterations, int numTasks) throws InterruptedException, ExecutionException {
+ List<T> results = new ArrayList<T>();
+ T firstRun = executor.run();
+ results.add(firstRun);
+ for (int i = 0; i < 3; i++) {
+
+ }
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicLong count = new AtomicLong(iterations);
+ List<Future<List<T>>> futures = new ArrayList<Future<List<T>>>();
+ for (int i = 0; i < numTasks; i++) {
+ futures.add(pool.submit(new Callable<List<T>>() {
+
+ @Override
+ public List<T> call() throws Exception {
+ List<T> results = new ArrayList<T>();
+ latch.await();
+ while(count.decrementAndGet() >= 0) {
+ results.add(executor.run());
+ }
+ return results;
+ }
+ }));
+ }
+ latch.countDown();
+ for (Future<List<T>> future : futures) {
+ results.addAll(future.get());
+ }
+ return results;
+ }
+ public void duel(DuelJudge<T> judge, final DuelExecutor<T> executor, int iterations) throws InterruptedException, ExecutionException {
+ duel(judge, executor, iterations, numExecutorThreads);
+ }
+
+ public void duel(DuelJudge<T> judge, final DuelExecutor<T> executor, int iterations, int threadCount) throws InterruptedException, ExecutionException {
+ T firstRun = executor.run();
+ List<T> runDuel = runDuel(executor, iterations, threadCount);
+ for (T t : runDuel) {
+ judge.judge(firstRun, t);
+ }
+ }
+
+ public static interface DuelExecutor<T> {
+ public T run();
+ }
+
+ public static interface DuelJudge<T> {
+ public void judge(T firstRun, T result);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java
new file mode 100644
index 0000000..6e9ceb7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTests.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacetBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ExtendedFacetsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ protected int numDocs() {
+ return 2500;
+ }
+
+
+ @Test
+ @Slow
+ public void testTermFacet_stringFields() throws Throwable {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("field1_paged")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "paged_bytes")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("field1_fst")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("field1_dv")
+ .field("type", "string")
+ .field("index", randomBoolean() ? "no" : "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .field("loading", randomBoolean() ? "eager" : "lazy")
+ .endObject()
+ .endObject()
+ .startObject("q_field")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ )
+ .execute().actionGet();
+
+
+ Random random = getRandom();
+ int numOfQueryValues = 50;
+ String[] queryValues = new String[numOfQueryValues];
+ for (int i = 0; i < numOfQueryValues; i++) {
+ queryValues[i] = randomAsciiOfLength(5);
+ }
+
+ Set<String> uniqueValuesSet = new HashSet<String>();
+ int numOfVals = 400;
+ for (int i = 0; i < numOfVals; i++) {
+ uniqueValuesSet.add(randomAsciiOfLength(10));
+ }
+ String[] allUniqueFieldValues = uniqueValuesSet.toArray(new String[uniqueValuesSet.size()]);
+
+ Set<String> allField1Values = new HashSet<String>();
+ Set<String> allField1AndField2Values = new HashSet<String>();
+ Map<String, Map<String, Integer>> queryValToField1FacetEntries = new HashMap<String, Map<String, Integer>>();
+ Map<String, Map<String, Integer>> queryValToField1and2FacetEntries = new HashMap<String, Map<String, Integer>>();
+ for (int i = 1; i <= numDocs(); i++) {
+ int numField1Values = random.nextInt(17);
+ Set<String> field1Values = new HashSet<String>(numField1Values);
+ for (int j = 0; j <= numField1Values; j++) {
+ boolean added = false;
+ while (!added) {
+ added = field1Values.add(allUniqueFieldValues[random.nextInt(numOfVals)]);
+ }
+ }
+ allField1Values.addAll(field1Values);
+ allField1AndField2Values.addAll(field1Values);
+ String field2Val = allUniqueFieldValues[random.nextInt(numOfVals)];
+ allField1AndField2Values.add(field2Val);
+ String queryVal = queryValues[random.nextInt(numOfQueryValues)];
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("field1_paged", field1Values)
+ .field("field1_fst", field1Values)
+ .field("field1_dv", field1Values)
+ .field("field2", field2Val)
+ .field("q_field", queryVal)
+ .endObject())
+ .execute().actionGet();
+
+ if (random.nextInt(2000) == 854) {
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ }
+ addControlValues(queryValToField1FacetEntries, field1Values, queryVal);
+ addControlValues(queryValToField1and2FacetEntries, field1Values, queryVal);
+ addControlValues(queryValToField1and2FacetEntries, field2Val, queryVal);
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ String[] facetFields = new String[]{"field1_paged", "field1_fst", "field1_dv"};
+ TermsFacet.ComparatorType[] compTypes = TermsFacet.ComparatorType.values();
+ for (String facetField : facetFields) {
+ for (String queryVal : queryValToField1FacetEntries.keySet()) {
+ Set<String> allFieldValues;
+ Map<String, Integer> queryControlFacets;
+ TermsFacet.ComparatorType compType = compTypes[random.nextInt(compTypes.length)];
+ TermsFacetBuilder termsFacetBuilder = FacetBuilders.termsFacet("facet1").order(compType);
+
+ boolean useFields;
+ if (random.nextInt(4) == 3) {
+ useFields = true;
+ queryControlFacets = queryValToField1and2FacetEntries.get(queryVal);
+ allFieldValues = allField1AndField2Values;
+ termsFacetBuilder.fields(facetField, "field2");
+ } else {
+ queryControlFacets = queryValToField1FacetEntries.get(queryVal);
+ allFieldValues = allField1Values;
+ useFields = false;
+ termsFacetBuilder.field(facetField);
+ }
+ int size;
+ if (numberOfShards() == 1 || compType == TermsFacet.ComparatorType.TERM || compType == TermsFacet.ComparatorType.REVERSE_TERM) {
+ size = random.nextInt(queryControlFacets.size());
+ } else {
+ size = allFieldValues.size();
+ }
+ termsFacetBuilder.size(size);
+
+ if (random.nextBoolean()) {
+ termsFacetBuilder.executionHint("map");
+ }
+ List<String> excludes = new ArrayList<String>();
+ if (random.nextBoolean()) {
+ int numExcludes = random.nextInt(5) + 1;
+ List<String> facetValues = new ArrayList<String>(queryControlFacets.keySet());
+ for (int i = 0; i < numExcludes; i++) {
+ excludes.add(facetValues.get(random.nextInt(facetValues.size())));
+ }
+ termsFacetBuilder.exclude(excludes.toArray());
+ }
+ String regex = null;
+ if (random.nextBoolean()) {
+ List<String> facetValues = new ArrayList<String>(queryControlFacets.keySet());
+ regex = facetValues.get(random.nextInt(facetValues.size()));
+ regex = "^" + regex.substring(0, regex.length() / 2) + ".*";
+ termsFacetBuilder.regex(regex);
+ }
+
+ boolean allTerms = random.nextInt(10) == 3;
+ termsFacetBuilder.allTerms(allTerms);
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.termQuery("q_field", queryVal))
+ .addFacet(termsFacetBuilder)
+ .execute().actionGet();
+ TermsFacet actualFacetEntries = response.getFacets().facet("facet1");
+
+ List<Tuple<Text, Integer>> expectedFacetEntries = getExpectedFacetEntries(allFieldValues, queryControlFacets, size, compType, excludes, regex, allTerms);
+ String reason = String.format(Locale.ROOT, "query: [%s] field: [%s] size: [%d] order: [%s] all_terms: [%s] fields: [%s] regex: [%s] excludes: [%s]", queryVal, facetField, size, compType, allTerms, useFields, regex, excludes);
+ assertThat(reason, actualFacetEntries.getEntries().size(), equalTo(expectedFacetEntries.size()));
+ for (int i = 0; i < expectedFacetEntries.size(); i++) {
+ assertThat(reason, actualFacetEntries.getEntries().get(i).getTerm(), equalTo(expectedFacetEntries.get(i).v1()));
+ assertThat(reason, actualFacetEntries.getEntries().get(i).getCount(), equalTo(expectedFacetEntries.get(i).v2()));
+ }
+ }
+ }
+ }
+
+ private void addControlValues(Map<String, Map<String, Integer>> queryValToFacetFieldEntries, String fieldVal, String queryVal) {
+ Map<String, Integer> controlFieldFacets = queryValToFacetFieldEntries.get(queryVal);
+ if (controlFieldFacets == null) {
+ controlFieldFacets = new HashMap<String, Integer>();
+ queryValToFacetFieldEntries.put(queryVal, controlFieldFacets);
+ }
+ Integer controlCount = controlFieldFacets.get(fieldVal);
+ if (controlCount == null) {
+ controlCount = 0;
+ }
+ controlFieldFacets.put(fieldVal, ++controlCount);
+ }
+
+ private void addControlValues(Map<String, Map<String, Integer>> queryValToFacetFieldEntries, Set<String> fieldValues, String queryVal) {
+ for (String fieldValue : fieldValues) {
+ addControlValues(queryValToFacetFieldEntries, fieldValue, queryVal);
+ }
+ }
+
+ private List<Tuple<Text, Integer>> getExpectedFacetEntries(Set<String> fieldValues,
+ Map<String, Integer> controlFacetsField,
+ int size,
+ TermsFacet.ComparatorType sort,
+ List<String> excludes,
+ String regex,
+ boolean allTerms) {
+ Pattern pattern = null;
+ if (regex != null) {
+ pattern = Regex.compile(regex, null);
+ }
+
+ List<Tuple<Text, Integer>> entries = new ArrayList<Tuple<Text, Integer>>();
+ for (Map.Entry<String, Integer> e : controlFacetsField.entrySet()) {
+ if (excludes.contains(e.getKey())) {
+ continue;
+ }
+ if (pattern != null && !pattern.matcher(e.getKey()).matches()) {
+ continue;
+ }
+
+ entries.add(new Tuple<Text, Integer>(new StringText(e.getKey()), e.getValue()));
+ }
+
+ if (allTerms) {
+ for (String fieldValue : fieldValues) {
+ if (!controlFacetsField.containsKey(fieldValue)) {
+ if (excludes.contains(fieldValue)) {
+ continue;
+ }
+ if (pattern != null && !pattern.matcher(fieldValue).matches()) {
+ continue;
+ }
+
+ entries.add(new Tuple<Text, Integer>(new StringText(fieldValue), 0));
+ }
+ }
+ }
+
+ switch (sort) {
+ case COUNT:
+ Collections.sort(entries, count);
+ break;
+ case REVERSE_COUNT:
+ Collections.sort(entries, count_reverse);
+ break;
+ case TERM:
+ Collections.sort(entries, term);
+ break;
+ case REVERSE_TERM:
+ Collections.sort(entries, term_reverse);
+ break;
+ }
+ return size >= entries.size() ? entries : entries.subList(0, size);
+ }
+
+ private final static Count count = new Count();
+ private final static CountReverse count_reverse = new CountReverse();
+ private final static Term term = new Term();
+ private final static TermReverse term_reverse = new TermReverse();
+
+ private static class Count implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ int cmp = o2.v2() - o1.v2();
+ if (cmp != 0) {
+ return cmp;
+ }
+ cmp = o2.v1().compareTo(o1.v1());
+ if (cmp != 0) {
+ return cmp;
+ }
+ return System.identityHashCode(o2) - System.identityHashCode(o1);
+ }
+
+ }
+
+ private static class CountReverse implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return -count.compare(o1, o2);
+ }
+
+ }
+
+ private static class Term implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return o1.v1().compareTo(o2.v1());
+ }
+
+ }
+
+ private static class TermReverse implements Comparator<Tuple<Text, Integer>> {
+
+ @Override
+ public int compare(Tuple<Text, Integer> o1, Tuple<Text, Integer> o2) {
+ return -term.compare(o1, o2);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java
new file mode 100644
index 0000000..88faf06
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/ExtendedFacetsTestsMultiShardMultiNodeTests.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+/**
+ */
+public class ExtendedFacetsTestsMultiShardMultiNodeTests extends ExtendedFacetsTests {
+
+ @Override
+ protected int numberOfShards() {
+ return 8;
+ }
+
+ @Override
+ protected int numDocs() {
+ return 10000;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java
new file mode 100644
index 0000000..6130db0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/SimpleFacetsTests.java
@@ -0,0 +1,2378 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.facet;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.datehistogram.DateHistogramFacet;
+import org.elasticsearch.search.facet.filter.FilterFacet;
+import org.elasticsearch.search.facet.histogram.HistogramFacet;
+import org.elasticsearch.search.facet.query.QueryFacet;
+import org.elasticsearch.search.facet.range.RangeFacet;
+import org.elasticsearch.search.facet.statistical.StatisticalFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet;
+import org.elasticsearch.search.facet.terms.TermsFacet.Entry;
+import org.elasticsearch.search.facet.terms.doubles.InternalDoubleTermsFacet;
+import org.elasticsearch.search.facet.terms.longs.InternalLongTermsFacet;
+import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.termFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.facet.FacetBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleFacetsTests extends ElasticsearchIntegrationTest {
+
+ private int numRuns = -1;
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", between(1, 5))
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfRuns() {
+ if (numRuns == -1) {
+ numRuns = atLeast(3);
+ }
+ return numRuns;
+ }
+
+ @Test
+ public void testSimpleFacetEmptyFacetFilter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(new BytesArray(
+ "{\"facet1\":{\"filter\":{ }}}").array())
+ .get();
+
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ FilterFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testSimpleFacetEmptyFilterFacet() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(new BytesArray(
+ "{\"facet1\":{\"terms\":{\"field\":\"tag\"},\"facet_filter\":{ }}}").array())
+ .get();
+
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("green"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ }
+
+ @Test
+ public void testBinaryFacet() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "blue")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setFacets(XContentFactory.jsonBuilder().startObject()
+ .startObject("facet1")
+ .startObject("terms")
+ .field("field", "tag")
+ .endObject()
+ .endObject()
+ .endObject().bytes())
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testFacetNumeric() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name", "" + i)
+ .field("multiValued", "" + i, "" + (90 + i % 10))
+ .field("byte", i)
+ .field("short", i + Byte.MAX_VALUE)
+ .field("integer", i + Short.MAX_VALUE)
+ .field("long", i + Integer.MAX_VALUE)
+ .field("float", (float) i)
+ .field("double", (double) i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ String[] execHint = new String[]{"map", null};
+ for (String hint : execHint) {
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").executionHint(hint).field("double").size(10))
+ .addFacet(termsFacet("float").executionHint(hint).field("float").size(10))
+ .addFacet(termsFacet("integer").executionHint(hint).field("integer").size(10))
+ .addFacet(termsFacet("long").executionHint(hint).field("long").size(10))
+ .addFacet(termsFacet("short").executionHint(hint).field("short").size(10))
+ .addFacet(termsFacet("byte").executionHint(hint).field("byte").size(10))
+ .addFacet(termsFacet("termFacet").executionHint(hint).field("name").size(10))
+ .addFacet(termsFacet("termFacetRegex").executionHint(hint).field("multiValued").regex("9\\d").size(20))
+ .addFacet(termsFacet("termFacetScript").executionHint(hint).field("multiValued").script("Integer.toHexString(Integer.parseInt(term))").size(10))
+ .addFacet(termsFacet("termFacetScriptRegex").executionHint(hint).field("multiValued").script("Integer.toHexString(Integer.parseInt(term))").regex("9\\d").size(20))
+
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(110l));
+ TermsFacet facet = searchResponse.getFacets().facet("termFacet");
+ assertThat(facet.getName(), equalTo("termFacet"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("termFacetRegex");
+ assertThat(facet.getName(), equalTo("termFacetRegex"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ int count = 99;
+ for (Entry entry : facet) {
+ assertThat(Integer.parseInt(entry.getTerm().string()), equalTo(count--));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("termFacetScriptRegex");
+ assertThat(facet.getName(), equalTo("termFacetScriptRegex"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ count = 99;
+ for (Entry entry : facet) {
+ assertThat(entry.getTerm().string(), equalTo(Integer.toHexString(count--)));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("termFacetScript");
+ assertThat(facet.getName(), equalTo("termFacetScript"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(190l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ count = 99;
+ for (Entry entry : facet) {
+ assertThat(entry.getTerm().string(), equalTo(Integer.toHexString(count--)));
+ assertThat(entry.getCount(), equalTo(10));
+ }
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("float");
+ assertThat(facet.getName(), equalTo("float"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("integer");
+ assertThat(facet.getName(), equalTo("integer"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ facet = searchResponse.getFacets().facet("short");
+ assertThat(facet.getName(), equalTo("short"));
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ }
+
+ }
+
+
+ @Test
+ @Slow
+ public void testConcurrentFacets() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name", "" + i)
+ .field("byte", i)
+ .field("short", i + Byte.MAX_VALUE)
+ .field("integer", i + Short.MAX_VALUE)
+ .field("long", i + Integer.MAX_VALUE)
+ .field("float", (float) i)
+ .field("double", (double) i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ ConcurrentDuel<Facets> duel = new ConcurrentDuel<Facets>(5);
+ {
+ final Client cl = client();
+
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), isIn(new String[]{"short", "double", "byte", "float", "integer", "long", "termFacet"}));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ if (count.incrementAndGet() % 2 == 0) { // every second request is mapped
+ facetRequest = cl.prepareSearch().setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").field("double").size(10))
+ .addFacet(termsFacet("float").field("float").size(10))
+ .addFacet(termsFacet("integer").field("integer").size(10))
+ .addFacet(termsFacet("long").field("long").size(10))
+ .addFacet(termsFacet("short").field("short").size(10))
+ .addFacet(termsFacet("byte").field("byte").size(10))
+ .addFacet(termsFacet("termFacet").field("name").size(10));
+ } else {
+ facetRequest = cl.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("double").executionHint("map").field("double").size(10))
+ .addFacet(termsFacet("float").executionHint("map").field("float").size(10))
+ .addFacet(termsFacet("integer").executionHint("map").field("integer").size(10))
+ .addFacet(termsFacet("long").executionHint("map").field("long").size(10))
+ .addFacet(termsFacet("short").executionHint("map").field("short").size(10))
+ .addFacet(termsFacet("byte").executionHint("map").field("byte").size(10))
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").size(10));
+ }
+
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+ {
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), equalTo("termFacet"));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ switch (count.incrementAndGet() % 6) {
+ case 4:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").script("\"\" + (Integer.parseInt(term) % 100)").size(10));
+ break;
+ case 3:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").regex("\\d+").size(10));
+ break;
+ case 2:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").regex("\\d+").script("term").size(10));
+ break;
+ case 1:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").regex("\\d+").script("term").size(10));
+ break;
+ case 0:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field("name").size(10));
+ break;
+ default:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field("name").size(10));
+ break;
+ }
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+
+ duel.close();
+ }
+
+ @Test
+ @Slow
+ public void testDuelByteFieldDataImpl() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("name_paged")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "paged_bytes").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_fst")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata").field("format", "doc_values").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_paged_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "paged_bytes").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_fst_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("name_dv_mv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata").field("format", "doc_values").field("loading", randomBoolean() ? "eager" : "lazy").endObject()
+ .endObject()
+ .startObject("filtered")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").startObject("filter")
+ .startObject("regex").field("pattern", "\\d{1,2}").endObject().endObject()
+ .endObject()
+ // only 1 or 2 digits
+ .endObject()
+ .startObject("filtered_mv")
+ .field("type", "string")
+ .startObject("fielddata").field("format", "fst").field("loading", randomBoolean() ? "eager" : "lazy").startObject("filter")
+ .startObject("regex").field("pattern", "\\d{1,2}").endObject().endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("name_paged", "" + i)
+ .field("name_fst", "" + i)
+ .field("filtered", "" + i)
+ .field("name_paged_mv", "" + i, "" + Math.min(99, i + 1))
+ .field("name_fst_mv", "" + i, "" + Math.min(99, i + 1))
+ .field("filtered_mv", "" + i, "" + Math.min(99, i + 1), "" + (100 + i))
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", "" + (i + 100)).setSource(jsonBuilder().startObject()
+ .field("foo", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ ConcurrentDuel<Facets> duel = new ConcurrentDuel<Facets>(5);
+ String[] fieldPostFix = new String[]{"", "_mv"};
+ for (final String postfix : fieldPostFix) {
+ duel.duel(new ConcurrentDuel.DuelJudge<Facets>() {
+
+ @Override
+ public void judge(Facets firstRun, Facets result) {
+ for (Facet f : result) {
+ TermsFacet facet = (TermsFacet) f;
+ assertThat(facet.getName(), equalTo("termFacet"));
+ TermsFacet firstRunFacet = (TermsFacet) firstRun.getFacets().get(facet.getName());
+ assertThat(facet.getEntries().size(), equalTo(firstRunFacet.getEntries().size()));
+
+ if ("_mv".equals(postfix)) {
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(199l));
+ assertThat(facet.getOtherCount(), equalTo(179l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ } else {
+ assertThat(facet.getEntries().size(), equalTo(10));
+ assertThat(facet.getTotalCount(), equalTo(100l));
+ assertThat(facet.getOtherCount(), equalTo(90l));
+ assertThat(facet.getMissingCount(), equalTo(10l));
+ }
+ List<? extends Entry> right = facet.getEntries();
+ List<? extends Entry> left = firstRunFacet.getEntries();
+
+ for (int i = 0; i < facet.getEntries().size(); i++) {
+ assertThat(left.get(i).getTerm(), equalTo(right.get(i).getTerm()));
+ assertThat(left.get(i).getCount(), equalTo(right.get(i).getCount()));
+ }
+ }
+ }
+ }, new ConcurrentDuel.DuelExecutor<Facets>() {
+ AtomicInteger count = new AtomicInteger();
+
+ @Override
+ public Facets run() {
+ final SearchRequestBuilder facetRequest;
+ int incrementAndGet = count.incrementAndGet();
+ final String field;
+ switch (incrementAndGet % 2) {
+ case 1:
+ field = "filtered" + postfix;
+ break;
+ case 0:
+ field = "name_paged" + postfix;
+ break;
+ default:
+ field = "name_fst" + postfix;
+ }
+ switch (incrementAndGet % 5) {
+ case 4:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).script("\"\" + (Integer.parseInt(term) % 100)").size(10));
+ break;
+ case 3:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).regex("\\d+").size(10));
+ break;
+ case 2:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).regex("\\d+").script("term").size(10));
+ break;
+ case 1:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).regex("\\d+").script("term").size(10));
+ break;
+ case 0:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").field(field).size(10));
+ break;
+ default:
+ facetRequest = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("termFacet").executionHint("map").field(field).size(10));
+ break;
+ }
+ SearchResponse actionGet = facetRequest.execute().actionGet();
+ return actionGet.getFacets();
+ }
+ }, 5000
+ );
+ }
+
+ duel.close();
+ }
+
+ @Test
+ public void testSearchFilter() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "green")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("tag", "blue")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(termFilter("tag", "blue"))
+ .addFacet(termsFacet("facet1").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("green"), equalTo("blue")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testFacetsWithSize0() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("lstag", 111)
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .startArray("ltag").value(1000l).value(2000l).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("lstag", 111)
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .startArray("ltag").value(3000l).value(2000l).endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setSize(0)
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .addFacet(termsFacet("facet2").field("tag").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testTermsIndexFacet() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test1", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test1", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test2", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("_index").size(10))
+ .execute().actionGet();
+
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("test1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("test2"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ }
+
+ try {
+ client().admin().indices().prepareDelete("test1").execute().actionGet();
+ client().admin().indices().prepareDelete("test2").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ @Test
+ public void testFilterFacets() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(filterFacet("facet1").filter(termFilter("stag", "111")))
+ .addFacet(filterFacet("facet2").filter(termFilter("tag", "xxx")))
+ .addFacet(filterFacet("facet3").filter(termFilter("tag", "yyy")))
+ .addFacet(filterFacet("facet4").filter(termFilter("tag", "zzz")))
+ .execute().actionGet();
+
+ FilterFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getCount(), equalTo(2l));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("facet3");
+ assertThat(facet.getName(), equalTo("facet3"));
+ assertThat(facet.getCount(), equalTo(2l));
+
+ facet = searchResponse.getFacets().facet("facet4");
+ assertThat(facet.getName(), equalTo("facet4"));
+ assertThat(facet.getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testTermsFacetsMissing() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("bstag").field("type", "byte").endObject()
+ .startObject("shstag").field("type", "short").endObject()
+ .startObject("istag").field("type", "integer").endObject()
+ .startObject("lstag").field("type", "long").endObject()
+ .startObject("fstag").field("type", "float").endObject()
+ .startObject("dstag").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("kuku", "kuku")
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getMissingCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testTermsFacetsNoHint() throws Exception {
+ testTermsFacets(null);
+ }
+
+ @Test
+ public void testTermsFacetsMapHint() throws Exception {
+ testTermsFacets("map");
+ }
+
+ private void testTermsFacets(String executionHint) throws Exception {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("bstag").field("type", "byte").endObject()
+ .startObject("shstag").field("type", "short").endObject()
+ .startObject("istag").field("type", "integer").endObject()
+ .startObject("lstag").field("type", "long").endObject()
+ .startObject("fstag").field("type", "float").endObject()
+ .startObject("dstag").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .startArray("tag").value("xxx").value("yyy").endArray()
+ .startArray("ltag").value(1000l).value(2000l).endArray()
+ .startArray("dtag").value(1000.1).value(2000.1).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("stag", "111")
+ .field("bstag", 111)
+ .field("shstag", 111)
+ .field("istag", 111)
+ .field("lstag", 111)
+ .field("fstag", 111.1f)
+ .field("dstag", 111.1)
+ .startArray("tag").value("zzz").value("yyy").endArray()
+ .startArray("ltag").value(3000l).value(2000l).endArray()
+ .startArray("dtag").value(3000.1).value(2000.1).endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("stag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getTotalCount(), equalTo(2l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ // Numeric
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("lstag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("ltag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet3").field("ltag").size(10).exclude(3000).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("1000"), equalTo("3000")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("1000"), equalTo("3000")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ facet = searchResponse.getFacets().facet("facet3");
+ assertThat(facet, instanceOf(InternalLongTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("1000"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("dstag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").field("dtag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet, instanceOf(InternalDoubleTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet, instanceOf(InternalDoubleTermsFacet.class));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("2000.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("1000.1"), equalTo("3000.1")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("1000.1"), equalTo("3000.1")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("bstag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("istag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("stag", "111"))
+ .addFacet(termsFacet("facet1").field("shstag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ // Test Facet Filter
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10).facetFilter(termFilter("tag", "xxx")).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // now with global
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("stag").size(10).facetFilter(termFilter("tag", "xxx")).global(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // Test Facet Filter (with a type)
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("type1.stag").size(10).facetFilter(termFilter("tag", "xxx")).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ // Bounded Size
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(2).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Test Exclude
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).exclude("yyy").executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Test Order
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).order(TermsFacet.ComparatorType.REVERSE_TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+
+ // Script
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).script("term + param1").param("param1", "a").order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxxa"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyya"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), equalTo("zzza"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("tag").size(10).script("term == 'xxx' ? false : true").order(TermsFacet.ComparatorType.TERM).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("zzz"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1));
+
+ // Fields Facets
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").fields("stag", "tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(4));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("111"), equalTo("yyy")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("111"), equalTo("yyy")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("zzz"), equalTo("xxx")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1));
+ assertThat(facet.getEntries().get(3).getTerm().string(), anyOf(equalTo("zzz"), equalTo("xxx")));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("tag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").fields("tag", "stag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(4));
+ assertThat(facet.getEntries().get(0).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(3).getTerm().string(), anyOf(equalTo("xxx"), equalTo("yyy"), equalTo("zzz"), equalTo("111")));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("ltag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTermAsNumber().intValue(), anyOf(equalTo(1000), equalTo(2000), equalTo(3000)));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("xxx", "yyy")) // don't match anything
+ .addFacet(termsFacet("facet1").field("dtag").size(10).allTerms(true).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(1).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(0));
+ assertThat(facet.getEntries().get(2).getTermAsNumber().doubleValue(), anyOf(equalTo(1000.1), equalTo(2000.1), equalTo(3000.1)));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(0));
+
+ // Script Field
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").scriptField("_source.stag").size(10).executionHint(executionHint))
+ .addFacet(termsFacet("facet2").scriptField("_source.tag").size(10).executionHint(executionHint))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getTotalCount(), equalTo(2l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("111"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+
+ facet = searchResponse.getFacets().facet("facet2");
+ assertThat(facet.getTotalCount(), equalTo(4l));
+ assertThat(facet.getOtherCount(), equalTo(0l));
+ assertThat(facet.getName(), equalTo("facet2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testTermFacetWithEqualTermDistribution() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // at the end of the index, we should have 10 of each `bar`, `foo`, and `baz`
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "foo bar")
+ .endObject()).execute().actionGet();
+ }
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "bar baz")
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 0; i < 5; i++) {
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("text", "baz foo")
+ .endObject()).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("facet1").field("text").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ for (int j = 0; j < 3; j++) {
+ assertThat(facet.getEntries().get(j).getTerm().string(), anyOf(equalTo("foo"), equalTo("bar"), equalTo("baz")));
+ assertThat(facet.getEntries().get(j).getCount(), equalTo(10));
+ }
+ }
+ }
+
+ @Test
+ public void testStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and some facets fail when the facet field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 2)
+ .startArray("multi_num").value(3.0).value(4.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats1").field("num"))
+ .addFacet(statisticalFacet("stats2").field("multi_num"))
+ .addFacet(statisticalScriptFacet("stats3").script("doc['num'].value * 2"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ StatisticalFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(2l));
+ assertThat(facet.getTotal(), equalTo(3d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(2d));
+ assertThat(facet.getMean(), equalTo(1.5d));
+ assertThat(facet.getSumOfSquares(), equalTo(5d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(4l));
+ assertThat(facet.getTotal(), equalTo(10d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(2.5d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(2l));
+ assertThat(facet.getTotal(), equalTo(6d));
+ assertThat(facet.getMin(), equalTo(2d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(3d));
+ assertThat(facet.getSumOfSquares(), equalTo(20d));
+
+ // test multi field facet
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats").fields("num", "multi_num"))
+ .execute().actionGet();
+
+
+ facet = searchResponse.getFacets().facet("stats");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(6l));
+ assertThat(facet.getTotal(), equalTo(13d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(13d / 6d));
+ assertThat(facet.getSumOfSquares(), equalTo(35d));
+
+ // test cross field facet using the same facet name...
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(statisticalFacet("stats").field("num"))
+ .addFacet(statisticalFacet("stats").field("multi_num"))
+ .execute().actionGet();
+
+
+ facet = searchResponse.getFacets().facet("stats");
+ assertThat(facet.getName(), equalTo(facet.getName()));
+ assertThat(facet.getCount(), equalTo(6l));
+ assertThat(facet.getTotal(), equalTo(13d));
+ assertThat(facet.getMin(), equalTo(1d));
+ assertThat(facet.getMax(), equalTo(4d));
+ assertThat(facet.getMean(), equalTo(13d / 6d));
+ assertThat(facet.getSumOfSquares(), equalTo(35d));
+ }
+ }
+
+ @Test
+ public void testHistoFacetEdge() throws Exception {
+ // TODO: Make sure facet doesn't fail in case of dynamic mapping
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 100)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 200)
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 300)
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("facet1").field("num").valueField("num").interval(100))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ HistogramFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(100l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(200l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getKey(), equalTo(300l));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testHistoFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1055)
+ .field("date", "1970-01-01T00:00:00")
+ .startArray("multi_num").value(13.0f).value(23.f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1065)
+ .field("date", "1970-01-01T00:00:25")
+ .startArray("multi_num").value(15.0f).value(31.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1175)
+ .field("date", "1970-01-01T00:02:00")
+ .startArray("multi_num").value(17.0f).value(25.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(histogramFacet("stats1").field("num").valueField("num").interval(100))
+ .addFacet(histogramFacet("stats2").field("multi_num").valueField("multi_num").interval(10))
+ .addFacet(histogramFacet("stats3").keyField("num").valueField("multi_num").interval(100))
+ .addFacet(histogramScriptFacet("stats4").keyScript("doc['date'].date.minuteOfHour").valueScript("doc['num'].value"))
+ .addFacet(histogramFacet("stats5").field("date").interval(1, TimeUnit.MINUTES))
+ .addFacet(histogramScriptFacet("stats6").keyField("num").valueScript("doc['num'].value").interval(100))
+ .addFacet(histogramFacet("stats7").field("num").interval(100))
+ .addFacet(histogramScriptFacet("stats8").keyField("num").valueScript("doc.score").interval(100))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ HistogramFacet facet;
+
+ facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1055d, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(1065d, 0.000001));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(1175d, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(1175d, 0.000001));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(10l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(45d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(15d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(20l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(48d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(24d));
+ assertThat(facet.getEntries().get(2).getKey(), equalTo(30l));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), equalTo(31d));
+ assertThat(facet.getEntries().get(2).getMean(), equalTo(31d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo("stats3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(82d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(20.5d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(42d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(21d));
+
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getName(), equalTo("stats4"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(0l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getName(), equalTo("stats5"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(0l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(TimeValue.timeValueMinutes(2).millis()));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getName(), equalTo("stats6"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2120d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1060d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1175d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1175d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getName(), equalTo("stats7"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getName(), equalTo("stats8"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getKey(), equalTo(1000l));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2d));
+ assertThat(facet.getEntries().get(0).getMean(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getKey(), equalTo(1100l));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getMean(), equalTo(1d));
+
+ }
+ }
+
+ @Test
+ public void testRangeFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .startObject("value").field("type", "integer").endObject()
+ .startObject("multi_value").field("type", "float").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1055)
+ .field("value", 1)
+ .field("date", "1970-01-01T00:00:00")
+ .startArray("multi_num").value(13.0f).value(23.f).endArray()
+ .startArray("multi_value").value(10).value(11).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1065)
+ .field("value", 2)
+ .field("date", "1970-01-01T00:00:25")
+ .startArray("multi_num").value(15.0f).value(31.0f).endArray()
+ .startArray("multi_value").value(20).value(21).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("num", 1175)
+ .field("value", 3)
+ .field("date", "1970-01-01T00:00:52")
+ .startArray("multi_num").value(17.0f).value(25.0f).endArray()
+ .startArray("multi_value").value(30).value(31).endArray()
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(rangeFacet("range1").field("num").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range2").keyField("num").valueField("value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range3").keyField("num").valueField("multi_value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range4").keyField("multi_num").valueField("value").addUnboundedFrom(16).addRange(10, 26).addUnboundedTo(20))
+ .addFacet(rangeScriptFacet("range5").keyScript("doc['num'].value").valueScript("doc['value'].value").addUnboundedFrom(1056).addRange(1000, 1170).addUnboundedTo(1170))
+ .addFacet(rangeFacet("range6").field("date").addUnboundedFrom("1970-01-01T00:00:26").addRange("1970-01-01T00:00:15", "1970-01-01T00:00:53").addUnboundedTo("1970-01-01T00:00:26"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ RangeFacet facet = searchResponse.getFacets().facet("range1");
+ assertThat(facet.getName(), equalTo("range1"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(0).getToAsString()), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(1).getFromAsString()), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(Double.parseDouble(facet.getEntries().get(1).getToAsString()), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1055 + 1065, 0.000001));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(1055, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(1065, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(1175, 0.000001));
+ assertThat(facet.getEntries().get(2).getMin(), closeTo(1175, 0.000001));
+ assertThat(facet.getEntries().get(2).getMax(), closeTo(1175, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range2");
+ assertThat(facet.getName(), equalTo("range2"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range3");
+ assertThat(facet.getName(), equalTo("range3"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(10 + 11, 0.000001));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(11, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(62, 0.000001));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(21, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(61, 0.000001));
+ assertThat(facet.getEntries().get(2).getMin(), closeTo(30, 0.000001));
+ assertThat(facet.getEntries().get(2).getMax(), closeTo(31, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range4");
+ assertThat(facet.getName(), equalTo("range4"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(16, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(26, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1 + 2 + 3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(20, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(1 + 2 + 3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range5");
+ assertThat(facet.getName(), equalTo("range5"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(1056, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(1000, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(3, 0.000001));
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(1170, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(3, 0.000001));
+
+ facet = searchResponse.getFacets().facet("range6");
+ assertThat(facet.getName(), equalTo("range6"));
+ assertThat(facet.getEntries().size(), equalTo(3));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getToAsString(), equalTo("1970-01-01T00:00:26"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getFromAsString(), equalTo("1970-01-01T00:00:15"));
+ assertThat(facet.getEntries().get(1).getToAsString(), equalTo("1970-01-01T00:00:53"));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(2).getFromAsString(), equalTo("1970-01-01T00:00:26"));
+ }
+ }
+
+ @Test
+ public void testDateHistoFacetsCollectorMode() throws Exception {
+ testDateHistoFacets(FacetBuilder.Mode.COLLECTOR);
+ }
+
+ @Test
+ public void testDateHistoFacetsPostMode() throws Exception {
+ testDateHistoFacets(FacetBuilder.Mode.POST);
+ }
+
+ private void testDateHistoFacets(FacetBuilder.Mode mode) throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .startObject("date_in_seconds").field("type", "long").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ DateTimeFormatter parser = Joda.forPattern("dateOptionalTime").parser();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T01:01:01")
+ .field("num", 1)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-05T01:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T04:01:01")
+ .field("num", 2)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-05T04:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-06T01:01:01")
+ .field("num", 3)
+ .field("date_in_seconds", TimeUnit.SECONDS.convert(parser.parseMillis("2009-03-06T01:01:01"), TimeUnit.MILLISECONDS))
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("stats1").field("date").interval("day").mode(mode))
+ .addFacet(dateHistogramFacet("stats2").field("date").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats3").field("date").valueField("num").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats4").field("date").valueScript("doc['num'].value * 2").interval("day").preZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats5").field("date").interval("24h").mode(mode))
+ .addFacet(dateHistogramFacet("stats6").field("date").valueField("num").interval("day").preZone("-02:00").postZone("-02:00").mode(mode))
+ .addFacet(dateHistogramFacet("stats7").field("date").interval("quarter").mode(mode))
+ .addFacet(dateHistogramFacet("stats8").field("date_in_seconds").interval("day").factor(1000f).mode(mode))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ DateHistogramFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getName(), equalTo("stats3"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(5d));
+
+ // time zone causes the dates to shift by 2
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getName(), equalTo("stats4"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(2d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(10d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getName(), equalTo("stats5"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getName(), equalTo("stats6"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-04") - TimeValue.timeValueHours(2).millis()));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(1d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-05") - TimeValue.timeValueHours(2).millis()));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(5d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getName(), equalTo("stats7"));
+ assertThat(facet.getEntries().size(), equalTo(1));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-01-01")));
+
+ // check date_histogram on a long field containing date in seconds - we use a factor.
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getName(), equalTo("stats8"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/2141
+ public void testDateHistoFacets_preZoneBug() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("date").field("type", "date").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T23:31:01")
+ .field("num", 1)
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T18:01:01")
+ .field("num", 2)
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("date", "2009-03-05T22:01:01")
+ .field("num", 3)
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(dateHistogramFacet("stats1").field("date").interval("day").preZone("+02:00"))
+ .addFacet(dateHistogramFacet("stats2").field("date").valueField("num").interval("day").preZone("+01:30"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ // time zone causes the dates to shift by 2:00
+ DateHistogramFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getName(), equalTo("stats1"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+
+ // time zone causes the dates to shift by 1:30
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getName(), equalTo("stats2"));
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTime(), equalTo(utcTimeInMillis("2009-03-05")));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), equalTo(5d));
+ assertThat(facet.getEntries().get(1).getTime(), equalTo(utcTimeInMillis("2009-03-06")));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), equalTo(1d));
+ }
+ }
+
+ @Test
+ public void testTermsStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field").field("type", "string").endObject()
+ .startObject("num").field("type", "integer").endObject()
+ .startObject("multi_num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .field("num", 100.0)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .field("num", 200.0)
+ .startArray("multi_num").value(2.0).value(3.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "yyy")
+ .field("num", 500.0)
+ .startArray("multi_num").value(5.0).value(6.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("field").valueField("num"))
+ .addFacet(termsStatsFacet("stats2").keyField("field").valueField("multi_num"))
+ .addFacet(termsStatsFacet("stats3").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats4").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats5").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.TOTAL))
+ .addFacet(termsStatsFacet("stats6").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.TOTAL))
+
+ .addFacet(termsStatsFacet("stats7").keyField("field").valueField("num").allTerms())
+ .addFacet(termsStatsFacet("stats8").keyField("field").valueField("multi_num").allTerms())
+ .addFacet(termsStatsFacet("stats9").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.COUNT).allTerms())
+ .addFacet(termsStatsFacet("stats10").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.COUNT).allTerms())
+ .addFacet(termsStatsFacet("stats11").keyField("field").valueField("num").order(TermsStatsFacet.ComparatorType.TOTAL).allTerms())
+ .addFacet(termsStatsFacet("stats12").keyField("field").valueField("multi_num").order(TermsStatsFacet.ComparatorType.TOTAL).allTerms())
+
+ .addFacet(termsStatsFacet("stats13").keyField("field").valueScript("doc['num'].value * 2"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotalCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotalCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(1d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(3d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(5d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(6d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats3");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats4");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats5");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(300d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats6");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(11d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(8d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats7");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats8");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats9");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats10");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(8d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(11d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats11");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(300d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats12");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(11d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(8d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats13");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("xxx"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(600d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("yyy"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(1000d, 0.00001d));
+ }
+ }
+
+ @Test
+ public void testNumericTermsStatsFacets() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("lField").field("type", "long").endObject()
+ .startObject("dField").field("type", "double").endObject()
+ .startObject("num").field("type", "float").endObject()
+ .startObject("multi_num").field("type", "integer").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 100l)
+ .field("dField", 100.1d)
+ .field("num", 100.0)
+ .startArray("multi_num").value(1.0).value(2.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 100l)
+ .field("dField", 100.1d)
+ .field("num", 200.0)
+ .startArray("multi_num").value(2.0).value(3.0f).endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("lField", 200l)
+ .field("dField", 200.2d)
+ .field("num", 500.0)
+ .startArray("multi_num").value(5.0).value(6.0f).endArray()
+ .endObject()).execute().actionGet();
+ flushAndRefresh();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("lField").valueField("num"))
+ .addFacet(termsStatsFacet("stats2").keyField("dField").valueField("num"))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("100"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("200"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(2));
+ assertThat(facet.getEntries().get(0).getTerm().string(), equalTo("100.1"));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(0).getMin(), closeTo(100d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getMax(), closeTo(200d, 0.00001d));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(300d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTerm().string(), equalTo("200.2"));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ assertThat(facet.getEntries().get(1).getMin(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getMax(), closeTo(500d, 0.00001d));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(500d, 0.00001d));
+ }
+ }
+
+ @Test
+ public void testTermsStatsFacets2() throws Exception {
+ // TODO: facet shouldn't fail when faceted field is mapped dynamically
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num").field("type", "float").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("num", i % 10).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("stats1").keyField("num").valueScript("doc.score").order(TermsStatsFacet.ComparatorType.COUNT))
+ .addFacet(termsStatsFacet("stats2").keyField("num").valueScript("doc.score").order(TermsStatsFacet.ComparatorType.TOTAL))
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ TermsStatsFacet facet = searchResponse.getFacets().facet("stats1");
+ assertThat(facet.getEntries().size(), equalTo(10));
+
+ facet = searchResponse.getFacets().facet("stats2");
+ assertThat(facet.getEntries().size(), equalTo(10));
+ }
+ }
+
+ @Test
+ public void testQueryFacet() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("num", i % 10).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termQuery("num", 1)))
+ .execute().actionGet();
+
+ QueryFacet facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termQuery("num", 1)).global(true))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(queryFacet("query").query(termsQuery("num", new long[]{1, 2})).facetFilter(termFilter("num", 1)).global(true))
+ .execute().actionGet();
+
+ facet = searchResponse.getFacets().facet("query");
+ assertThat(facet.getCount(), equalTo(2l));
+ }
+ }
+
+ @Test // #3479: Null pointer exception for POST mode facets if facet_filter accepts no documents
+ public void testFilterFacetWithFacetFilterPostMode() throws IOException {
+ createIndex("test");
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("field", "xxx")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < numberOfRuns(); i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(
+ termsFacet("facet1").field("field").mode(FacetBuilder.Mode.POST).facetFilter(termFilter("tag", "doesnotexist"))
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ TermsFacet facet = searchResponse.getFacets().facet("facet1");
+ assertThat(facet.getName(), equalTo("facet1"));
+ assertThat(facet.getEntries().size(), equalTo(0));
+ }
+ }
+
+ private long utcTimeInMillis(String time) {
+ return timeInMillis(time, DateTimeZone.UTC);
+ }
+
+ private long timeInMillis(String time, DateTimeZone zone) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java b/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java
new file mode 100644
index 0000000..b06e640
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/terms/ShardSizeTermsFacetTests.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ShardSizeTermsFacetTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 5)
+ .put("2", 4)
+ .put("3", 3) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_withExecutionHintMap() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).executionHint("map").order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 8)
+ .put("3", 8)
+ .put("2", 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_withExecutionHintMap_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).executionHint("map").order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Integer> expected = ImmutableMap.<String, Integer>builder()
+ .put("1", 5)
+ .put("2", 4)
+ .put("3", 3) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 5)
+ .put(2, 4)
+ .put(3, 3)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 4)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 8)
+ .put(3, 8)
+ .put(2, 5) // <-- count is now fixed
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("keys").field("key").size(3).shardSize(5).order(TermsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsFacet terms = facets.facet("keys");
+ List<? extends TermsFacet.Entry> entries = terms.getEntries();
+ assertThat(entries.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Integer> expected = ImmutableMap.<Integer, Integer>builder()
+ .put(1, 5)
+ .put(2, 4)
+ .put(3, 3)
+ .build();
+ for (TermsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+
+ indexDoc("1", "1", 5);
+ indexDoc("1", "2", 4);
+ indexDoc("1", "3", 3);
+ indexDoc("1", "4", 2);
+ indexDoc("1", "5", 1);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3);
+ indexDoc("2", "2", 1);
+ indexDoc("2", "3", 5);
+ indexDoc("2", "4", 2);
+ indexDoc("2", "5", 1);
+
+ // total docs in shard "2" = 12
+
+ client().admin().indices().prepareFlush("idx").execute().actionGet();
+ client().admin().indices().prepareRefresh("idx").execute().actionGet();
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ }
+
+ private void indexDoc(String shard, String key, int times) throws Exception {
+ for (int i = 0; i < times; i++) {
+ client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .endObject()).execute().actionGet();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java b/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java
new file mode 100644
index 0000000..911be14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/terms/UnmappedFieldsTermsFacetsTests.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.terms;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ protected int numberOfShards() {
+ return 5;
+ }
+
+ /**
+ * Tests the terms facet when faceting on unmapped field
+ */
+ @Test
+ public void testUnmappedField() throws Exception {
+ createIndex("idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("mapped").field("mapped").size(10))
+ .addFacet(termsFacet("unmapped_bool").field("unmapped_bool").size(10))
+ .addFacet(termsFacet("unmapped_str").field("unmapped_str").size(10))
+ .addFacet(termsFacet("unmapped_byte").field("unmapped_byte").size(10))
+ .addFacet(termsFacet("unmapped_short").field("unmapped_short").size(10))
+ .addFacet(termsFacet("unmapped_int").field("unmapped_int").size(10))
+ .addFacet(termsFacet("unmapped_long").field("unmapped_long").size(10))
+ .addFacet(termsFacet("unmapped_float").field("unmapped_float").size(10))
+ .addFacet(termsFacet("unmapped_double").field("unmapped_double").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ // all values should be returned for the mapped field
+ TermsFacet facet = searchResponse.getFacets().facet("mapped");
+ assertThat(facet.getName(), equalTo("mapped"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ // no values should be returned for the unmapped field (all docs are missing)
+
+ facet = searchResponse.getFacets().facet("unmapped_str");
+ assertThat(facet.getName(), equalTo("unmapped_str"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_bool");
+ assertThat(facet.getName(), equalTo("unmapped_bool"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_byte");
+ assertThat(facet.getName(), equalTo("unmapped_byte"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_short");
+ assertThat(facet.getName(), equalTo("unmapped_short"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_int");
+ assertThat(facet.getName(), equalTo("unmapped_int"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_long");
+ assertThat(facet.getName(), equalTo("unmapped_long"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_float");
+ assertThat(facet.getName(), equalTo("unmapped_float"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("unmapped_double");
+ assertThat(facet.getName(), equalTo("unmapped_double"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ }
+
+
+ /**
+ * Tests the terms facet when faceting on partially unmapped field. An example for this scenario is when searching
+ * across indices, where the field is mapped in some indices and unmapped in others.
+ */
+ @Test
+ public void testPartiallyUnmappedField() throws ElasticsearchException, IOException {
+ client().admin().indices().prepareCreate("mapped_idx")
+ .setSettings(indexSettings())
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("partially_mapped_byte").field("type", "byte").endObject()
+ .startObject("partially_mapped_short").field("type", "short").endObject()
+ .startObject("partially_mapped_int").field("type", "integer").endObject()
+ .startObject("partially_mapped_long").field("type", "long").endObject()
+ .startObject("partially_mapped_float").field("type", "float").endObject()
+ .startObject("partially_mapped_double").field("type", "double").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ createIndex("unmapped_idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("mapped_idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .field("partially_mapped_str", "" + i)
+ .field("partially_mapped_bool", i % 2 == 0)
+ .field("partially_mapped_byte", i)
+ .field("partially_mapped_short", i)
+ .field("partially_mapped_int", i)
+ .field("partially_mapped_long", i)
+ .field("partially_mapped_float", i)
+ .field("partially_mapped_double", i)
+ .endObject()).execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) {
+ client().prepareIndex("unmapped_idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped", "" + i)
+ .endObject()).execute().actionGet();
+ }
+
+
+ flushAndRefresh();
+
+ SearchResponse searchResponse = client().prepareSearch("mapped_idx", "unmapped_idx")
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("mapped").field("mapped").size(10))
+ .addFacet(termsFacet("partially_mapped_str").field("partially_mapped_str").size(10))
+ .addFacet(termsFacet("partially_mapped_bool").field("partially_mapped_bool").size(10))
+ .addFacet(termsFacet("partially_mapped_byte").field("partially_mapped_byte").size(10))
+ .addFacet(termsFacet("partially_mapped_short").field("partially_mapped_short").size(10))
+ .addFacet(termsFacet("partially_mapped_int").field("partially_mapped_int").size(10))
+ .addFacet(termsFacet("partially_mapped_long").field("partially_mapped_long").size(10))
+ .addFacet(termsFacet("partially_mapped_float").field("partially_mapped_float").size(10))
+ .addFacet(termsFacet("partially_mapped_double").field("partially_mapped_double").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+
+ // all values should be returned for the mapped field
+ TermsFacet facet = searchResponse.getFacets().facet("mapped");
+ assertThat(facet.getName(), equalTo("mapped"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(20l));
+ assertThat(facet.getOtherCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ // only the values of the mapped index should be returned for the partially mapped field (all docs of
+ // the unmapped index should be missing)
+
+ facet = searchResponse.getFacets().facet("partially_mapped_str");
+ assertThat(facet.getName(), equalTo("partially_mapped_str"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_bool");
+ assertThat(facet.getName(), equalTo("partially_mapped_bool"));
+ ArrayList<String> terms = new ArrayList<String>();
+ for (TermsFacet.Entry entry : facet.getEntries()) {
+ terms.add(entry.getTerm().toString());
+ }
+ assertThat("unexpected number of bool terms:" + terms, facet.getEntries().size(), is(2));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_byte");
+ assertThat(facet.getName(), equalTo("partially_mapped_byte"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_short");
+ assertThat(facet.getName(), equalTo("partially_mapped_short"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_int");
+ assertThat(facet.getName(), equalTo("partially_mapped_int"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_long");
+ assertThat(facet.getName(), equalTo("partially_mapped_long"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_float");
+ assertThat(facet.getName(), equalTo("partially_mapped_float"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("partially_mapped_float");
+ assertThat(facet.getName(), equalTo("partially_mapped_float"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getOtherCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+ @Test
+ public void testMappedYetMissingField() throws IOException {
+ client().admin().indices().prepareCreate("idx")
+ .setSettings(indexSettings())
+ .addMapping("type", jsonBuilder().startObject()
+ .field("type").startObject()
+ .field("properties").startObject()
+ .field("string").startObject().field("type", "string").endObject()
+ .field("long").startObject().field("type", "long").endObject()
+ .field("double").startObject().field("type", "double").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("foo", "bar")
+ .endObject()).execute().actionGet();
+ }
+ flushAndRefresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("string").field("string").size(10))
+ .addFacet(termsFacet("long").field("long").size(10))
+ .addFacet(termsFacet("double").field("double").size(10))
+ .execute().actionGet();
+
+ TermsFacet facet = searchResponse.getFacets().facet("string");
+ assertThat(facet.getName(), equalTo("string"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+ /**
+ * Tests the terms facet when faceting on multiple fields
+ * case 1: some but not all the fields are mapped
+ * case 2: all the fields are unmapped
+ */
+ @Test
+ public void testMultiFields() throws Exception {
+ createIndex("idx");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", "" + i).setSource(jsonBuilder().startObject()
+ .field("mapped_str", "" + i)
+ .field("mapped_long", i)
+ .field("mapped_double", i)
+ .endObject()).execute().actionGet();
+ }
+
+ flushAndRefresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addFacet(termsFacet("string").fields("mapped_str", "unmapped").size(10))
+ .addFacet(termsFacet("long").fields("mapped_long", "unmapped").size(10))
+ .addFacet(termsFacet("double").fields("mapped_double", "unmapped").size(10))
+ .addFacet(termsFacet("all_unmapped").fields("unmapped", "unmapped_1").size(10))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ TermsFacet facet = searchResponse.getFacets().facet("string");
+ assertThat(facet.getName(), equalTo("string"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("long");
+ assertThat(facet.getName(), equalTo("long"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("double");
+ assertThat(facet.getName(), equalTo("double"));
+ assertThat(facet.getEntries().size(), is(10));
+ assertThat(facet.getTotalCount(), is(10l));
+ assertThat(facet.getMissingCount(), is(0l));
+
+ facet = searchResponse.getFacets().facet("all_unmapped");
+ assertThat(facet.getName(), equalTo("all_unmapped"));
+ assertThat(facet.getEntries().size(), is(0));
+ assertThat(facet.getTotalCount(), is(0l));
+ assertThat(facet.getMissingCount(), is(10l));
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java b/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java
new file mode 100644
index 0000000..3b2a34e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/facet/termsstats/ShardSizeTermsStatsFacetTests.java
@@ -0,0 +1,548 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.facet.termsstats;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.facet.Facets;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.termsStatsFacet;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ShardSizeTermsStatsFacetTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
+ * control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
+ * doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
+ * routing value "2".
+ */
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .put("cluster.routing.operation.hash.type", "djb")
+ .put("cluster.routing.operation.use_type", "false")
+ .build();
+ }
+
+ @Test
+ public void noShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_string_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .put("4", 4l)
+ .put("5", 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .put("4", 4l)
+ .put("5", 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 5l)
+ .put("2", 4l)
+ .put("3", 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTerm().string())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=long")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_allTerms() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(0).shardSize(3).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(5));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .put(4, 4l)
+ .put(5, 2l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ client().admin().indices().prepareCreate("idx")
+ .addMapping("type", "key", "type=double")
+ .execute().actionGet();
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
+ .setQuery(matchAllQuery())
+ .addFacet(termsStatsFacet("keys").keyField("key").valueField("value").size(3).shardSize(5).order(TermsStatsFacet.ComparatorType.COUNT))
+ .execute().actionGet();
+
+ Facets facets = response.getFacets();
+ TermsStatsFacet facet = facets.facet("keys");
+ List<? extends TermsStatsFacet.Entry> entries = facet.getEntries();
+ assertThat(entries.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (TermsStatsFacet.Entry entry : entries) {
+ assertThat(entry.getCount(), equalTo(expected.get(entry.getTermAsNumber().intValue())));
+ }
+ }
+
+ private void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+
+ indexDoc("1", "1", 5);
+ indexDoc("1", "2", 4);
+ indexDoc("1", "3", 3);
+ indexDoc("1", "4", 2);
+ indexDoc("1", "5", 1);
+
+ // total docs in shard "1" = 15
+
+ indexDoc("2", "1", 3);
+ indexDoc("2", "2", 1);
+ indexDoc("2", "3", 5);
+ indexDoc("2", "4", 2);
+ indexDoc("2", "5", 1);
+
+ // total docs in shard "2" = 12
+
+ client().admin().indices().prepareFlush("idx").execute().actionGet();
+ client().admin().indices().prepareRefresh("idx").execute().actionGet();
+
+ long totalOnOne = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ long totalOnTwo = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet().getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ }
+
+ private void indexDoc(String shard, String key, int times) throws Exception {
+ for (int i = 0; i < times; i++) {
+ client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .field("value", 1)
+ .endObject()).execute().actionGet();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java b/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
new file mode 100644
index 0000000..c397b8a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
@@ -0,0 +1,486 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fields;
+
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.refreshRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SearchFieldsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 1) // why just one?
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testStoredFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .startObject("field3").field("type", "string").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .field("field3", "value3")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field1").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+
+ // field2 is not stored, check that it gets extracted from source
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field2").value().toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), nullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").addField("_source").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+ }
+
+ @Test
+ public void testScriptDocAndFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num1").field("type", "double").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .addScriptField("sNum1_field", "_fields['num1'].value")
+ .addScriptField("date1", "doc['date'].date.millis")
+ .execute().actionGet();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).isSourceEmpty(), equalTo(true));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1_field").values().get(0), equalTo(1.0));
+ assertThat((Long) response.getHits().getAt(0).fields().get("date1").values().get(0), equalTo(0l));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1_field").values().get(0), equalTo(2.0));
+ assertThat((Long) response.getHits().getAt(1).fields().get("date1").values().get(0), equalTo(25000l));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1_field").values().get(0), equalTo(3.0));
+ assertThat((Long) response.getHits().getAt(2).fields().get("date1").values().get(0), equalTo(120000l));
+
+ logger.info("running doc['num1'].value * factor");
+ Map<String, Object> params = MapBuilder.<String, Object>newMapBuilder().put("factor", 2.0).map();
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value * factor", params)
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(4.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(6.0));
+ }
+
+ @Test
+ public void testScriptFieldUsingSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("obj1").field("test", "something").endObject()
+ .startObject("obj2").startArray("arr2").value("arr_value1").value("arr_value2").endArray().endObject()
+ .startArray("arr3").startObject().field("arr3_field1", "arr3_value1").endObject().endArray()
+ .endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("s_obj1", "_source.obj1")
+ .addScriptField("s_obj1_test", "_source.obj1.test")
+ .addScriptField("s_obj2", "_source.obj2")
+ .addScriptField("s_obj2_arr2", "_source.obj2.arr2")
+ .addScriptField("s_arr3", "_source.arr3")
+ .execute().actionGet();
+
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj1 = response.getHits().getAt(0).field("s_obj1").value();
+ assertThat(sObj1.get("test").toString(), equalTo("something"));
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj2 = response.getHits().getAt(0).field("s_obj2").value();
+ List sObj2Arr2 = (List) sObj2.get("arr2");
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ sObj2Arr2 = (List) response.getHits().getAt(0).field("s_obj2_arr2").value();
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ List sObj2Arr3 = (List) response.getHits().getAt(0).field("s_arr3").value();
+ assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1"));
+ }
+
+ @Test
+ public void testPartialFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startObject("obj1")
+ .startArray("arr1")
+ .startObject().startObject("obj2").field("field2", "value21").endObject().endObject()
+ .startObject().startObject("obj2").field("field2", "value22").endObject().endObject()
+ .endArray()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("test")
+ .addPartialField("partial1", "obj1.arr1.*", null)
+ .addPartialField("partial2", null, "obj1")
+ .execute().actionGet();
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ Map<String, Object> partial1 = response.getHits().getAt(0).field("partial1").value();
+ assertThat(partial1, notNullValue());
+ assertThat(partial1.containsKey("field1"), equalTo(false));
+ assertThat(partial1.containsKey("obj1"), equalTo(true));
+ assertThat(((Map) partial1.get("obj1")).get("arr1"), instanceOf(List.class));
+
+ Map<String, Object> partial2 = response.getHits().getAt(0).field("partial2").value();
+ assertThat(partial2, notNullValue());
+ assertThat(partial2.containsKey("obj1"), equalTo(false));
+ assertThat(partial2.containsKey("field1"), equalTo(true));
+ }
+
+ @Test
+ public void testStoredFieldsWithoutSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("byte_field").field("type", "byte").field("store", "yes").endObject()
+ .startObject("short_field").field("type", "short").field("store", "yes").endObject()
+ .startObject("integer_field").field("type", "integer").field("store", "yes").endObject()
+ .startObject("long_field").field("type", "long").field("store", "yes").endObject()
+ .startObject("float_field").field("type", "float").field("store", "yes").endObject()
+ .startObject("double_field").field("type", "double").field("store", "yes").endObject()
+ .startObject("date_field").field("type", "date").field("store", "yes").endObject()
+ .startObject("boolean_field").field("type", "boolean").field("store", "yes").endObject()
+ .startObject("binary_field").field("type", "binary").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .field("binary_field", Base64.encodeBytes("testing text".getBytes("UTF8")))
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addField("byte_field")
+ .addField("short_field")
+ .addField("integer_field")
+ .addField("long_field")
+ .addField("float_field")
+ .addField("double_field")
+ .addField("date_field")
+ .addField("boolean_field")
+ .addField("binary_field")
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0f));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ String dateTime = Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) dateTime));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) Boolean.TRUE));
+ assertThat(((BytesReference) searchResponse.getHits().getAt(0).fields().get("binary_field").value()).toBytesArray(), equalTo((BytesReference) new BytesArray("testing text".getBytes("UTF8"))));
+
+ }
+
+ @Test
+ public void testSearchFields_metaData() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setTypes("my-type1")
+ .addField("field1").addField("_routing")
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value"));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").isMetadataField(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testSearchFields_nonLeafField() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField("field1").get();
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("field [field1] isn't a leaf field"));
+ }
+
+ @Test
+ public void testGetFields_complexField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object")
+ .startObject("field2").field("type", "object")
+ .startObject("field3").field("type", "object")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setRefresh(true).setSource(source).get();
+
+
+ String field = "field1.field2.field3.field4";
+ SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testFieldsPulledFromFieldData() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("string_field").field("type", "string").endObject()
+ .startObject("byte_field").field("type", "byte").endObject()
+ .startObject("short_field").field("type", "short").endObject()
+ .startObject("integer_field").field("type", "integer").endObject()
+ .startObject("long_field").field("type", "long").endObject()
+ .startObject("float_field").field("type", "float").endObject()
+ .startObject("double_field").field("type", "double").endObject()
+ .startObject("date_field").field("type", "date").endObject()
+ .startObject("boolean_field").field("type", "boolean").endObject()
+ .startObject("binary_field").field("type", "binary").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("string_field", "foo")
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery())
+ .addFieldDataField("string_field")
+ .addFieldDataField("byte_field")
+ .addFieldDataField("short_field")
+ .addFieldDataField("integer_field")
+ .addFieldDataField("long_field")
+ .addFieldDataField("float_field")
+ .addFieldDataField("double_field")
+ .addFieldDataField("date_field")
+ .addFieldDataField("boolean_field");
+ SearchResponse searchResponse = builder.execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value().toString(), equalTo("T"));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
new file mode 100644
index 0000000..a607f30
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
@@ -0,0 +1,715 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.index.query.MatchAllFilterBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExp() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 10).field("lon", 20).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 22).endObject()
+ .endObject()));
+
+ int numDummyDocs = 20;
+ for (int i = 1; i <= numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId(Integer.toString(i + 3))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11 + i).field("lon", 22 + i)
+ .endObject().endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(20));
+ lonlat.add(new Float(11));
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), linearDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Lin
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(termQuery("test", "value"))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ }
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ // add tw docs within offset
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()));
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("2").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()));
+
+ // add docs outside offset
+ int numDummyDocs = 20;
+ for (int i = 0; i < numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex().setType("type1").setId(Integer.toString(i + 3)).setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 3.0 + i).endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"),
+ exponentialDecayFunction("num", 1.0, 5.0).setOffset(1.0)).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+ // Test Lin
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ }
+
+ @Test
+ public void testBoostModeSettingWorks() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value value").startObject("loc").field("lat", 11).field("lon", 20)
+ .endObject().endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(20));
+ lonlat.add(new Float(11));
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ // Test Exp
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), equalTo("2"));
+ assertThat(sh.getAt(1).getId(), equalTo("1"));
+
+ }
+
+ @Test
+ public void testParseGeoPoint() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 20).field("lon", 11).endObject()
+ .endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+ GeoPoint point = new GeoPoint(20, 11);
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", point, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ float[] coords = { 11, 20 };
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", coords, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ }
+
+ @Test
+ public void testCombineModes() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()));
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+
+ indexRandom(false, builders);
+ refresh();
+
+ // function score should return 0.5 for this function
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MULT))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.REPLACE))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.SUM))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282 + 0.5), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.AVG))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo((0.30685282 + 0.5), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MIN))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MAX))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testExceptionThrownIfScaleLE0() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-28").endObject())).actionGet();
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")))));
+
+ SearchResponse sr = response.actionGet();
+ assertOrderedSearchHits(sr, "2", "1");
+ }
+
+ @Test
+ public void testParseDateMath() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis()).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis() - (1000 * 60 * 60 * 24)).endObject())).actionGet();
+ refresh();
+
+ SearchResponse sr = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "1", "2");
+
+ sr = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "2", "1");
+
+ }
+
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception {
+ DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder("num1", "2013-05-28", "1d").setDecay(100);
+
+ }
+
+ @Test
+ public void testValueMissingLin() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().startObject("num2").field("type", "double")
+ .endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num2", "1.0").endObject())).actionGet();
+ client().index(
+ indexRequest("test")
+ .type("type1")
+ .id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("4")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(
+ functionScoreQuery(termQuery("test", "value")).add(linearDecayFunction("num1", "2013-05-28", "+3d"))
+ .add(linearDecayFunction("num2", "0.0", "1")).scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(4));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[0], lessThan(scores[1]));
+ assertThat(scores[2], lessThan(scores[3]));
+
+ }
+
+ @Test
+ public void testDateWithoutOrigin() throws Exception {
+ DateTime dt = new DateTime();
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ DateTime docDate = dt.minusDays(1);
+ String docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(2);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(3);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(
+ functionScoreQuery(QueryBuilders.matchAllQuery()).add(linearDecayFunction("num1", "1000w"))
+ .add(gaussDecayFunction("num1", "1d")).add(exponentialDecayFunction("num1", "1000w"))
+ .scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(3));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[1], lessThan(scores[0]));
+ assertThat(scores[2], lessThan(scores[1]));
+
+ }
+
+ @Test
+ public void testManyDocsLin() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("date").field("type", "date").endObject().startObject("num").field("type", "double")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 200;
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();
+
+ for (int i = 0; i < numDocs; i++) {
+ double lat = 100 + (int) (10.0 * (float) (i) / (float) (numDocs));
+ double lon = 100;
+ int day = (int) (29.0 * (float) (i) / (float) (numDocs)) + 1;
+ String dayString = day < 10 ? "0" + Integer.toString(day) : Integer.toString(day);
+ String date = "2013-05-" + dayString;
+
+ indexBuilders.add(client().prepareIndex()
+ .setType("type")
+ .setId(Integer.toString(i))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").field("date", date).field("num", i).startObject("geo")
+ .field("lat", lat).field("lon", lon).endObject().endObject()));
+ }
+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);
+ indexRandom(true, builders);
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(100));
+ lonlat.add(new Float(110));
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().size(numDocs).query(
+ functionScoreQuery(termQuery("test", "value"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("date", "2013-05-30", "+15d"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("geo", lonlat, "1000km"))
+ .add(new MatchAllFilterBuilder(), linearDecayFunction("num", numDocs, numDocs / 2.0))
+ .scoreMode("multiply").boostMode(CombineFunction.REPLACE.getName()))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(numDocs));
+ double[] scores = new double[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore();
+ }
+ for (int i = 0; i < numDocs - 1; i++) {
+ assertThat(scores[i], lessThan(scores[i + 1]));
+
+ }
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldDoesNotExist() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 2;
+ client().index(
+ indexRequest("test").type("type1").source(
+ jsonBuilder().startObject().field("test", "value").startObject("geo").field("lat", 1).field("lon", 2).endObject()
+ .endObject())).actionGet();
+ refresh();
+ List<Float> lonlat = new ArrayList<Float>();
+ lonlat.add(new Float(100));
+ lonlat.add(new Float(110));
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(true)
+ .size(numDocs)
+ .query(functionScoreQuery(termQuery("test", "value")).add(new MatchAllFilterBuilder(),
+ linearDecayFunction("type1.geo", lonlat, "1000km")).scoreMode("multiply"))));
+ SearchResponse sr = response.actionGet();
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "string").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(
+ jsonBuilder().startObject().field("test", "value").field("num", Integer.toString(1)).endObject())).actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value")).add(new MatchAllFilterBuilder(),
+ linearDecayFunction("num", 1.0, 0.5)).scoreMode("multiply"))));
+ response.actionGet();
+ }
+
+ @Test
+ public void testNoQueryGiven() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()))
+ .actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery().add(new MatchAllFilterBuilder(), linearDecayFunction("num", 1, 0.5)).scoreMode(
+ "multiply"))));
+ response.actionGet();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
new file mode 100644
index 0000000..024cc04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class FunctionScorePluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put("plugin.types", CustomDistanceScorePlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testPlugin() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test")
+ .field("type", "string").endObject().startObject("num1").field("type", "date").endObject().endObject()
+ .endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-26").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d");
+
+ ActionFuture<SearchResponse> response = client().search(searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value")).add(gfb))));
+
+ SearchResponse sr = response.actionGet();
+ ElasticsearchAssertions.assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+
+ assertThat(sh.hits().length, equalTo(2));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ }
+
+ public static class CustomDistanceScorePlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-distance-score";
+ }
+
+ @Override
+ public String description() {
+ return "Distance score plugin to test pluggable implementation";
+ }
+
+ public void onModule(FunctionScoreModule scoreModule) {
+ scoreModule.registerParser(FunctionScorePluginTests.CustomDistanceScoreParser.class);
+ }
+
+ }
+
+ public static class CustomDistanceScoreParser extends DecayFunctionParser {
+
+ public static final String[] NAMES = { "linear_mult", "linearMult" };
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ static final DecayFunction decayFunction = new LinearMultScoreFunction();
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ static class LinearMultScoreFunction implements DecayFunction {
+ LinearMultScoreFunction() {
+ }
+
+ @Override
+ public double evaluate(double value, double scale) {
+
+ return value;
+ }
+
+ @Override
+ public Explanation explainFunction(String distanceString, double distanceVal, double scale) {
+ ComplexExplanation ce = new ComplexExplanation();
+ ce.setDescription("" + distanceVal);
+ return ce;
+ }
+
+ @Override
+ public double processScale(double userGivenScale, double userGivenValue) {
+ return userGivenScale;
+ }
+ }
+ }
+
+ public class CustomDistanceScoreBuilder extends DecayFunctionBuilder {
+
+ public CustomDistanceScoreBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return CustomDistanceScoreParser.NAMES[0];
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java b/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
new file mode 100644
index 0000000..7a19796
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.CoreMatchers;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class RandomScoreFunctionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void consistentHitsWithSameSeed() throws Exception {
+ final int replicas = between(0, 2); // needed for green status!
+ cluster().ensureAtLeastNumNodes(replicas + 1);
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(
+ ImmutableSettings.builder().put("index.number_of_shards", between(2, 5))
+ .put("index.number_of_replicas", replicas)
+ .build()));
+ ensureGreen(); // make sure we are done otherwise preference could change?
+ int docCount = atLeast(100);
+ for (int i = 0; i < docCount; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+ flush();
+ refresh();
+ int outerIters = atLeast(10);
+ for (int o = 0; o < outerIters; o++) {
+ final long seed = randomLong();
+ final String preference = randomRealisticUnicodeOfLengthBetween(1, 10); // at least one char!!
+ int innerIters = atLeast(2);
+ SearchHits hits = null;
+ for (int i = 0; i < innerIters; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setPreference(preference)
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))
+ .execute().actionGet();
+ assertThat("Failures " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, CoreMatchers.equalTo(0));
+ int hitCount = searchResponse.getHits().getHits().length;
+ if (i == 0) {
+ assertThat(hits, nullValue());
+ hits = searchResponse.getHits();
+ } else {
+ assertThat(hits.getHits().length, equalTo(searchResponse.getHits().getHits().length));
+ for (int j = 0; j < hitCount; j++) {
+ assertThat(searchResponse.getHits().getAt(j).score(), equalTo(hits.getAt(j).score()));
+ assertThat(searchResponse.getHits().getAt(j).id(), equalTo(hits.getAt(j).id()));
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ @Ignore
+ public void distribution() throws Exception {
+ int count = 10000;
+
+ prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ for (int i = 0; i < count; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+
+ flush();
+ refresh();
+
+ int[] matrix = new int[count];
+
+ for (int i = 0; i < count; i++) {
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(System.nanoTime())))
+ .execute().actionGet();
+
+ matrix[Integer.valueOf(searchResponse.getHits().getAt(0).id())]++;
+ }
+
+ int filled = 0;
+ int maxRepeat = 0;
+ int sumRepeat = 0;
+ for (int i = 0; i < matrix.length; i++) {
+ int value = matrix[i];
+ sumRepeat += value;
+ maxRepeat = Math.max(maxRepeat, value);
+ if (value > 0) {
+ filled++;
+ }
+ }
+
+ System.out.println();
+ System.out.println("max repeat: " + maxRepeat);
+ System.out.println("avg repeat: " + sumRepeat / (double) filled);
+ System.out.println("distribution: " + filled / (double) count);
+
+ int percentile50 = filled / 2;
+ int percentile25 = (filled / 4);
+ int percentile75 = percentile50 + percentile25;
+
+ int sum = 0;
+
+ for (int i = 0; i < matrix.length; i++) {
+ if (matrix[i] == 0) {
+ continue;
+ }
+ sum += i * matrix[i];
+ if (percentile50 == 0) {
+ System.out.println("median: " + i);
+ } else if (percentile25 == 0) {
+ System.out.println("percentile_25: " + i);
+ } else if (percentile75 == 0) {
+ System.out.println("percentile_75: " + i);
+ }
+ percentile50--;
+ percentile25--;
+ percentile75--;
+ }
+
+ System.out.println("mean: " + sum / (double) count);
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java b/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
new file mode 100644
index 0000000..ca6ddbc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoBoundingBoxFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GeoBoundingBoxTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleBoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+ }
+
+ @Test
+ public void limitsBoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(settingsBuilder().put("index.number_of_shards", "1")).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "8").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "9").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "10").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -11).bottomRight(40, 9)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -11).bottomRight(40, 9).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -9).bottomRight(40, 11)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(41, -9).bottomRight(40, 11).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(11, 171).bottomRight(1, -169)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(11, 171).bottomRight(1, -169).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(9, 169).bottomRight(-1, -171)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxFilter("location").topLeft(9, 169).bottomRight(-1, -171).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ }
+
+ @Test
+ public void limit2BoundingBoxTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(settingsBuilder().put("index.number_of_shards", "1")).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("userid", 880)
+ .field("title", "Place in Stockholm")
+ .startObject("location").field("lat", 59.328355000000002).field("lon", 18.036842).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("userid", 534)
+ .field("title", "Place in Montreal")
+ .startObject("location").field("lat", 45.509526999999999).field("lon", -73.570986000000005).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxFilter("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java b/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java
new file mode 100644
index 0000000..18237fb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoDistanceFacetTests.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.facet.geodistance.GeoDistanceFacet;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.facet.FacetBuilders.geoDistanceFacet;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoDistanceFacetTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleGeoFacetTests() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // to NY: 0
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .field("num", 1)
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .field("num", 2)
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .field("num", 3)
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .field("num", 4)
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .field("num", 5)
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .field("num", 6)
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .field("num", 7)
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS)
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ GeoDistanceFacet facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), not(closeTo(0, 0.00001)));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), not(closeTo(0, 0.00001)));
+
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS).valueField("num")
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(13, 0.00001));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(4, 0.00001));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(15, 0.00001));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), closeTo(24, 0.00001));
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS).valueScript("doc['num'].value")
+ .addUnboundedFrom(2)
+ .addRange(0, 1)
+ .addRange(0.5, 2.5)
+ .addUnboundedTo(1)
+ )
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(7l));
+ facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(4));
+
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(4l));
+ assertThat(facet.getEntries().get(0).getTotal(), closeTo(13, 0.00001));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(2l));
+ assertThat(facet.getEntries().get(1).getTotal(), closeTo(4, 0.00001));
+
+ assertThat(facet.getEntries().get(2).getFrom(), closeTo(0.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getTo(), closeTo(2.5, 0.000001));
+ assertThat(facet.getEntries().get(2).getCount(), equalTo(3l));
+ assertThat(facet.getEntries().get(2).getTotal(), closeTo(15, 0.00001));
+
+ assertThat(facet.getEntries().get(3).getFrom(), closeTo(1, 0.000001));
+ assertThat(facet.getEntries().get(3).getCount(), equalTo(5l));
+ assertThat(facet.getEntries().get(3).getTotal(), closeTo(24, 0.00001));
+ }
+
+ @Test
+ public void multiLocationGeoDistanceTest() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("num", 1)
+ .startArray("location")
+ // to NY: 0
+ .startObject().field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("num", 3)
+ .startArray("location")
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ // to NY: 1.055 km
+ .startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(matchAllQuery())
+ .addFacet(geoDistanceFacet("geo1").field("location").point(40.7143528, -74.0059731).unit(DistanceUnit.KILOMETERS)
+ .addRange(0, 2)
+ .addRange(2, 10)
+ )
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ GeoDistanceFacet facet = searchResponse.getFacets().facet("geo1");
+ assertThat(facet.getEntries().size(), equalTo(2));
+
+ assertThat(facet.getEntries().get(0).getFrom(), closeTo(0, 0.000001));
+ assertThat(facet.getEntries().get(0).getTo(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(0).getCount(), equalTo(2l));
+
+ assertThat(facet.getEntries().get(1).getFrom(), closeTo(2, 0.000001));
+ assertThat(facet.getEntries().get(1).getTo(), closeTo(10, 0.000001));
+ assertThat(facet.getEntries().get(1).getCount(), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java b/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000..f82f8c4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
@@ -0,0 +1,659 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleDistanceTests() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()),
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()),
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()),
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()),
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()),
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()),
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()));
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // now with a PLANE type
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("3km").geoDistance(GeoDistance.PLANE).point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // factor type is really too small for this resolution
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("2km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("2km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("1.242mi").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location").distance("1.242mi").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeFilter("location").from("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ // SORTING
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "7", "2", "6", "5", "4", "3", "1");
+ }
+
+ @Test
+ public void testDistanceSortingMVFields() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "New York")
+ .startObject("locations").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .startArray("locations")
+ // to NY: 1.055 km
+ .startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ // to NY: 1.258 km
+ .startObject().field("lat", 40.7247222).field("lon", -74).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("names", "Greenwich Village", "Brooklyn")
+ .startArray("locations")
+ // to NY: 2.029 km
+ .startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ // to NY: 8.572 km
+ .startObject().field("lat", 40.65).field("lon", -73.95).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ try {
+ client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum"))
+ .execute().actionGet();
+ fail("Expected error");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ @Test
+ // Regression bug: https://github.com/elasticsearch/elasticsearch/issues/2851
+ public void testDistanceSortingWithMissingGeoPoint() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "1", "2");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // Doc with missing geo point is first, is consistent with 0.20.x
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286d, 10d));
+ }
+
+ @Test
+ public void distanceScriptTests() throws Exception {
+ double source_lat = 32.798;
+ double source_long = -117.151;
+ double target_lat = 32.81;
+ double target_long = -117.21;
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "TestPosition")
+ .startObject("location").field("lat", source_lat).field("lon", source_long).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance1, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse2 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance2, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse3 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultArcDistance3 = searchResponse3.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance3, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse4 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distanceInKm(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance4, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse5 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat) + "," + (target_long + 360) + ")").execute().actionGet();
+ Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance5, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse6 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat + 360) + "," + (target_long) + ")").execute().actionGet();
+ Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance6, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse7 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].arcDistanceInMiles(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance7 = searchResponse7.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance7, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ SearchResponse searchResponse8 = client().prepareSearch().addField("_source").addScriptField("distance", "doc['location'].distanceInMiles(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance8 = searchResponse8.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance8, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ }
+
+ @Test
+ public void testDistanceSortingNestedFields() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("company")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("branches")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("companies")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("company", mapping)
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth("companies").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ indexRandom(true, client().prepareIndex("companies", "company", "1").setSource(jsonBuilder().startObject()
+ .field("name", "company 1")
+ .startArray("branches")
+ .startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "2").setSource(jsonBuilder().startObject()
+ .field("name", "company 2")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject() // to NY: 5.286 km
+ .endObject()
+ .startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject() // to NY: 0.4621 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "3").setSource(jsonBuilder().startObject()
+ .field("name", "company 3")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject() // to NY: 1.055 km
+ .endObject()
+ .startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject() // to NY: 1.258 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "4").setSource(jsonBuilder().startObject()
+ .field("name", "company 4")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject() // to NY: 2.029 km
+ .endObject()
+ .startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject() // to NY: 8.572 km
+ .endObject()
+ .endArray()
+ .endObject()));
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedPath("branches")
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedFilter(termFilter("branches.name", "brooklyn"))
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("4"));
+ assertSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ try {
+ client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("sum"))
+ .execute().actionGet();
+ fail("Expected error");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ /**
+ * Issue 3073
+ */
+ @Test
+ public void testGeoDistanceFilter() throws IOException {
+ double lat = 40.720611;
+ double lon = -73.998776;
+
+ XContentBuilder mapping = JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("location")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("geohash", true)
+ .field("geohash_precision", 24)
+ .field("lat_lon", true)
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ XContentBuilder source = JsonXContent.contentBuilder()
+ .startObject()
+ .field("pin", GeoHashUtils.encode(lat, lon))
+ .endObject();
+
+ ensureYellow();
+
+ client().admin().indices().prepareCreate("locations").addMapping("location", mapping).execute().actionGet();
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).execute().actionGet();
+ client().admin().indices().prepareRefresh("locations").execute().actionGet();
+ client().prepareGet("locations", "location", "1").execute().actionGet();
+
+ SearchResponse result = client().prepareSearch("locations")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDistanceFilter("pin")
+ .geoDistance(GeoDistance.ARC)
+ .lat(lat).lon(lon)
+ .distance("1m"))
+ .execute().actionGet();
+
+ assertHitCount(result, 1);
+ }
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
new file mode 100644
index 0000000..efb2e13
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
@@ -0,0 +1,603 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Random;
+import java.util.zip.GZIPInputStream;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoFilterTests extends ElasticsearchIntegrationTest {
+
+ private static boolean intersectSupport;
+ private static boolean disjointSupport;
+ private static boolean withinSupport;
+
+ @BeforeClass
+ public static void createNodes() throws Exception {
+ intersectSupport = testRelationSupport(SpatialOperation.Intersects);
+ disjointSupport = testRelationSupport(SpatialOperation.IsDisjointTo);
+ withinSupport = testRelationSupport(SpatialOperation.IsWithin);
+ }
+
+ private static byte[] unZipData(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ GZIPInputStream in = new GZIPInputStream(is);
+ Streams.copy(in, out);
+
+ is.close();
+ out.close();
+
+ return out.toByteArray();
+ }
+
+ @Test
+ public void testShapeBuilders() {
+
+ try {
+ // self intersection polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(10, 10)
+ .point(-10, 10)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ // polygon with hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close().close().build();
+
+ try {
+ // polygon with overlapping hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 11).point(5, 11).point(5, -5)
+ .close().close().build();
+
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // polygon with intersection holes
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .hole()
+ .point(-5, -6).point(5, -6).point(5, -4).point(-5, -4)
+ .close()
+ .close().build();
+ fail("Intersection of holes not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // Common line in polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(-10, 10)
+ .point(-5, 10)
+ .point(-5, -5)
+ .point(-5, 20)
+ .point(10, 20)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+// Not specified
+// try {
+// // two overlapping polygons within a multipolygon
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10)
+// .point(-10, 10)
+// .point(10, 10)
+// .point(10, -10)
+// .close()
+// .polygon()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close().build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ // Multipolygon: polygon with hole and polygon within the whole
+ ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .build();
+
+// Not supported
+// try {
+// // Multipolygon: polygon with hole and polygon within the hole but overlapping
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+// .hole()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close()
+// .close()
+// .polygon()
+// .point(-4, -4).point(-4, 6).point(4, 6).point(4, -4)
+// .close()
+// .build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ }
+
+ @Test
+ public void testShapeRelations() throws Exception {
+
+ assertTrue( "Intersect relation is not supported", intersectSupport);
+ assertTrue("Disjoint relation is not supported", disjointSupport);
+ assertTrue("within relation is not supported", withinSupport);
+
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("polygon")
+ .startObject("properties")
+ .startObject("area")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("store", true)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+
+ CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("shapes").addMapping("polygon", mapping);
+ mappingRequest.execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // Create a multipolygon with two polygons. The first is an rectangle of size 10x10
+ // with a hole of size 5x5 equidistant from all sides. This hole in turn contains
+ // the second polygon of size 4x4 equidistant from all sites
+ MultiPolygonBuilder polygon = ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close();
+
+ BytesReference data = jsonBuilder().startObject().field("area", polygon).endObject().bytes();
+
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Point in polygon
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // by definition the border of a polygon belongs to the inner
+ // so the border of a polygons hole also belongs to the inner
+ // of the polygon NOT the hole
+
+ // Point on polygon border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(10.0, 5.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point on hole border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(5.0, 2.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ if (disjointSupport) {
+ // Point not in polygon
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDisjointFilter("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoDisjointFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+ }
+
+ // Create a polygon that fills the empty area of the polygon defined above
+ PolygonBuilder inverse = ShapeBuilder.newPolygon()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .hole()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", inverse).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "2").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // re-check point on polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("2"));
+
+ // Create Polygon with hole and common edge
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(10, 5).point(10, -5)
+ .close()
+ .close();
+
+ if (withinSupport) {
+ // Polygon WithIn Polygon
+ builder = ShapeBuilder.newPolygon()
+ .point(-30, -30).point(-30, 30).point(30, 30).point(30, -30).close();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoWithinFilter("area", builder))
+ .execute().actionGet();
+ assertHitCount(result, 2);
+ }
+
+ // Create a polygon crossing longitude 180.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Create a polygon crossing longitude 180 with hole.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(-174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(180, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.geoIntersectionFilter("area", ShapeBuilder.newPoint(180, -6)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ @Slow
+ public void bulktest() throws Exception {
+ byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.json");
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("country")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("lat_lon", true)
+ .field("store", true)
+ .endObject()
+ .startObject("location")
+ .field("type", "geo_shape")
+ .field("lat_lon", true)
+ .field("store", true)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate("countries").addMapping("country", mapping).execute().actionGet();
+ BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, false, null, null).execute().actionGet();
+
+ for (BulkItemResponse item : bulk.getItems()) {
+ assertFalse("unable to index data", item.isFailed());
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ String key = "DE";
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("_id", key))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getId(), equalTo(key));
+ }
+
+ SearchResponse world = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoBoundingBoxFilter("pin")
+ .topLeft(90, -179.99999)
+ .bottomRight(-90, 179.99999))
+ ).execute().actionGet();
+
+ assertHitCount(world, 53);
+
+ SearchResponse distance = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoDistanceFilter("pin").distance("425km").point(51.11, 9.851)
+ )).execute().actionGet();
+
+ assertHitCount(distance, 5);
+ GeoPoint point = new GeoPoint();
+ for (SearchHit hit : distance.getHits()) {
+ String name = hit.getId();
+ point.resetFromString(hit.fields().get("pin").getValue().toString());
+ double dist = distance(point.getLat(), point.getLon(), 51.11, 9.851);
+
+ assertThat("distance to '" + name + "'", dist, lessThanOrEqualTo(425000d));
+ assertThat(name, anyOf(equalTo("CZ"), equalTo("DE"), equalTo("BE"), equalTo("NL"), equalTo("LU")));
+ if (key.equals(name)) {
+ assertThat(dist, equalTo(0d));
+ }
+ }
+ }
+
+ @Test
+ public void testGeohashCellFilter() throws IOException {
+ String geohash = randomhash(10);
+ logger.info("Testing geohash_cell filter for [{}]", geohash);
+
+ List<String> neighbors = GeoHashUtils.neighbors(geohash);
+ List<String> parentNeighbors = GeoHashUtils.neighbors(geohash.substring(0, geohash.length() - 1));
+
+ logger.info("Neighbors {}", neighbors);
+ logger.info("Parent Neighbors {}", parentNeighbors);
+
+ ensureYellow();
+
+ client().admin().indices().prepareCreate("locations").addMapping("location", "pin", "type=geo_point,geohash_prefix=true,latlon=false").execute().actionGet();
+
+ // Index a pin
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource("pin", geohash).execute().actionGet();
+
+ // index neighbors
+ for (int i = 0; i < neighbors.size(); i++) {
+ client().prepareIndex("locations", "location", "N" + i).setCreate(true).setSource("pin", neighbors.get(i)).execute().actionGet();
+ }
+
+ // Index parent cell
+ client().prepareIndex("locations", "location", "p").setCreate(true).setSource("pin", geohash.substring(0, geohash.length() - 1)).execute().actionGet();
+
+ // index neighbors
+ for (int i = 0; i < parentNeighbors.size(); i++) {
+ client().prepareIndex("locations", "location", "p" + i).setCreate(true).setSource("pin", parentNeighbors.get(i)).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh("locations").execute().actionGet();
+
+ // Result of this geohash search should contain the geohash only
+ SearchResponse results1 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter("{\"geohash_cell\": {\"pin\": \"" + geohash + "\", \"neighbors\": false}}").execute().actionGet();
+ assertHitCount(results1, 1);
+
+ // test the same, just with the builder
+ results1 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(geoHashCellFilter("pin", geohash, false)).execute().actionGet();
+ assertHitCount(results1, 1);
+
+ // Result of the parent query should contain the parent it self, its neighbors, the child and all its neighbors
+ SearchResponse results2 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter("{\"geohash_cell\": {\"pin\": \"" + geohash.substring(0, geohash.length() - 1) + "\", \"neighbors\": true}}").execute().actionGet();
+ assertHitCount(results2, 2 + neighbors.size() + parentNeighbors.size());
+
+ // Testing point formats and precision
+ GeoPoint point = GeoHashUtils.decode(geohash);
+ int precision = geohash.length();
+
+ logger.info("Testing lat/lon format");
+ String pointTest1 = "{\"geohash_cell\": {\"pin\": {\"lat\": " + point.lat() + ",\"lon\": " + point.lon() + "},\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results3 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest1).execute().actionGet();
+ assertHitCount(results3, neighbors.size() + 1);
+
+ logger.info("Testing String format");
+ String pointTest2 = "{\"geohash_cell\": {\"pin\": \"" + point.lat() + "," + point.lon() + "\",\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results4 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest2).execute().actionGet();
+ assertHitCount(results4, neighbors.size() + 1);
+
+ logger.info("Testing Array format");
+ String pointTest3 = "{\"geohash_cell\": {\"pin\": [" + point.lon() + "," + point.lat() + "],\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results5 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest3).execute().actionGet();
+ assertHitCount(results5, neighbors.size() + 1);
+ }
+
+ @Test
+ public void testNeighbors() {
+ // Simple root case
+ assertThat(GeoHashUtils.neighbors("7"), containsInAnyOrder("4", "5", "6", "d", "e", "h", "k", "s"));
+
+ // Root cases (Outer cells)
+ assertThat(GeoHashUtils.neighbors("0"), containsInAnyOrder("1", "2", "3", "p", "r"));
+ assertThat(GeoHashUtils.neighbors("b"), containsInAnyOrder("8", "9", "c", "x", "z"));
+ assertThat(GeoHashUtils.neighbors("p"), containsInAnyOrder("n", "q", "r", "0", "2"));
+ assertThat(GeoHashUtils.neighbors("z"), containsInAnyOrder("8", "b", "w", "x", "y"));
+
+ // Root crossing dateline
+ assertThat(GeoHashUtils.neighbors("2"), containsInAnyOrder("0", "1", "3", "8", "9", "p", "r", "x"));
+ assertThat(GeoHashUtils.neighbors("r"), containsInAnyOrder("0", "2", "8", "n", "p", "q", "w", "x"));
+
+ // level1: simple case
+ assertThat(GeoHashUtils.neighbors("dk"), containsInAnyOrder("d5", "d7", "de", "dh", "dj", "dm", "ds", "dt"));
+
+ // Level1: crossing cells
+ assertThat(GeoHashUtils.neighbors("d5"), containsInAnyOrder("d4", "d6", "d7", "dh", "dk", "9f", "9g", "9u"));
+ assertThat(GeoHashUtils.neighbors("d0"), containsInAnyOrder("d1", "d2", "d3", "9b", "9c", "6p", "6r", "3z"));
+ }
+
+ public static double distance(double lat1, double lon1, double lat2, double lon2) {
+ return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD(
+ DistanceUtils.toRadians(lat1),
+ DistanceUtils.toRadians(lon1),
+ DistanceUtils.toRadians(lat2),
+ DistanceUtils.toRadians(lon2)
+ );
+ }
+
+ protected static boolean testRelationSupport(SpatialOperation relation) {
+ try {
+ GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3);
+ RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area");
+ Shape shape = SpatialContext.GEO.makePoint(0, 0);
+ SpatialArgs args = new SpatialArgs(relation, shape);
+ strategy.makeFilter(args);
+ return true;
+ } catch (UnsupportedSpatialOperation e) {
+ return false;
+ }
+ }
+
+ protected static String randomhash(int length) {
+ return randomhash(getRandom(), length);
+ }
+
+ protected static String randomhash(Random random) {
+ return randomhash(random, 2 + random.nextInt(10));
+ }
+
+ protected static String randomhash() {
+ return randomhash(getRandom());
+ }
+
+ protected static String randomhash(Random random, int length) {
+ final char[] BASE_32 = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'b', 'c', 'd', 'e', 'f', 'g',
+ 'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r',
+ 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < length; i++) {
+ sb.append(BASE_32[random.nextInt(BASE_32.length)]);
+ }
+
+ return sb.toString();
+ }
+}
+
diff --git a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
new file mode 100644
index 0000000..31c8788
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.geoIntersectionFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class GeoShapeIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNullShape() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "aNullshape").setSource("{\"location\": null}").execute().actionGet();
+ GetResponse result = client().prepareGet("test", "type1", "aNullshape").execute().actionGet();
+ assertThat(result.getField("location"), nullValue());
+ }
+
+ @Test
+ public void testIndexPointsFilterRectangle() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Document 2")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-45).value(-50).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", shape)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(geoShapeQuery("location", shape))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testEdgeCases() throws Exception {
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "blakely").setSource(jsonBuilder().startObject()
+ .field("name", "Blakely Island")
+ .startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-122.83).value(48.57).endArray()
+ .startArray().value(-122.77).value(48.56).endArray()
+ .startArray().value(-122.79).value(48.53).endArray()
+ .startArray().value(-122.83).value(48.57).endArray() // close the polygon
+ .endArray().endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ ShapeBuilder query = ShapeBuilder.newEnvelope().topLeft(-122.88, 48.62).bottomRight(-122.82, 48.54);
+
+ // This search would fail if both geoshape indexing and geoshape filtering
+ // used the bottom-level optimization in SpatialPrefixTree#recursiveGetNodes.
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", query)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("blakely"));
+ }
+
+ @Test
+ public void testIndexedShapeReference() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+ XContentBuilder shapeContent = jsonBuilder().startObject()
+ .field("shape", shape);
+ shapeContent.endObject();
+ createIndex("shapes");
+ ensureGreen();
+ client().prepareIndex("shapes", "shape_type", "Big_Rectangle").setSource(shapeContent).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionFilter("location", "Big_Rectangle", "shape_type")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(geoShapeQuery("location", "Big_Rectangle", "shape_type"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testReusableBuilder() throws IOException {
+ ShapeBuilder polygon = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+ assertUnmodified(polygon);
+
+ ShapeBuilder linestring = ShapeBuilder.newLineString()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10);
+ assertUnmodified(linestring);
+ }
+
+ private void assertUnmodified(ShapeBuilder builder) throws IOException {
+ String before = jsonBuilder().startObject().field("area", builder).endObject().string();
+ builder.build();
+ String after = jsonBuilder().startObject().field("area", builder).endObject().string();
+ assertThat(before, equalTo(after));
+ }
+
+ @Test
+ public void testParsingMultipleShapes() throws IOException {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("location1")
+ .field("type", "geo_shape")
+ .endObject()
+ .startObject("location2")
+ .field("type", "geo_shape")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureYellow();
+
+ String p1 = "\"location1\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ String p2 = "\"location2\" : {\"type\":\"polygon\", \"coordinates\":[[[-20,-20],[20,-20],[20,20],[-20,20],[-20,-20]]]}";
+ String o1 = "{" + p1 + ", " + p2 + "}";
+
+ client().prepareIndex("test", "type1", "1").setSource(o1).execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ String filter = "{\"geo_shape\": {\"location2\": {\"indexed_shape\": {"
+ + "\"id\": \"1\","
+ + "\"type\": \"type1\","
+ + "\"index\": \"test\","
+ + "\"path\": \"location2\""
+ + "}}}}";
+
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(filter).execute().actionGet();
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ public void testShapeFetching_path() throws IOException {
+ prepareCreate("shapes").execute().actionGet();
+ prepareCreate("test").addMapping("type", "location", "type=geo_shape").execute().actionGet();
+ String location = "\"location\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ client().prepareIndex("shapes", "type", "1")
+ .setSource(
+ String.format(
+ Locale.ROOT, "{ %s, \"1\" : { %s, \"2\" : { %s, \"3\" : { %s } }} }", location, location, location, location
+ )
+ ).get();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-20).value(-20).endArray()
+ .startArray().value(20).value(-20).endArray()
+ .startArray().value(20).value(20).endArray()
+ .startArray().value(-20).value(20).endArray()
+ .startArray().value(-20).value(-20).endArray()
+ .endArray().endArray()
+ .endObject().endObject()).get();
+ client().admin().indices().prepareRefresh("test", "shapes").execute().actionGet();
+
+ GeoShapeFilterBuilder filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+ filter = FilterBuilders.geoShapeFilter("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertHitCount(result, 1);
+
+ // now test the query variant
+ GeoShapeQueryBuilder query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertHitCount(result, 1);
+ }
+
+ @Test // Issue 2944
+ public void testThatShapeIsReturnedEvenWhenExclusionsAreSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .startObject("_source")
+ .startArray("excludes").value("nonExistingField").endArray()
+ .endObject()
+ .endObject().endObject()
+ .string();
+ prepareCreate("test").addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "envelope")
+ .startArray("coordinates").startArray().value(-45.0).value(45).endArray().startArray().value(45).value(-45).endArray().endArray()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
+
+ Map<String, Object> indexedMap = searchResponse.getHits().getAt(0).sourceAsMap();
+ assertThat(indexedMap.get("location"), instanceOf(Map.class));
+ Map<String, Object> locationMap = (Map<String, Object>) indexedMap.get("location");
+ assertThat(locationMap.get("coordinates"), instanceOf(List.class));
+ List<List<Number>> coordinates = (List<List<Number>>) locationMap.get("coordinates");
+ assertThat(coordinates.size(), equalTo(2));
+ assertThat(coordinates.get(0).size(), equalTo(2));
+ assertThat(coordinates.get(0).get(0).doubleValue(), equalTo(-45.0));
+ assertThat(coordinates.get(0).get(1).doubleValue(), equalTo(45.0));
+ assertThat(coordinates.get(1).size(), equalTo(2));
+ assertThat(coordinates.get(1).get(0).doubleValue(), equalTo(45.0));
+ assertThat(coordinates.get(1).get(1).doubleValue(), equalTo(-45.0));
+ assertThat(locationMap.size(), equalTo(2));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/geo/gzippedmap.json b/src/test/java/org/elasticsearch/search/geo/gzippedmap.json
new file mode 100644
index 0000000..f77bdb8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/geo/gzippedmap.json
Binary files differ
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
new file mode 100644
index 0000000..f1a0b7c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * total dumb highlighter used to test the pluggable highlighting functionality
+ */
+public class CustomHighlighter implements Highlighter {
+
+ @Override
+ public String[] names() {
+ return new String[] { "test-custom" };
+ }
+
+ @Override
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+ SearchContextHighlight.Field field = highlighterContext.field;
+
+ List<Text> responses = Lists.newArrayList();
+ responses.add(new StringText("standard response"));
+
+ if (field.fieldOptions().options() != null) {
+ for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) {
+ responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue()));
+ }
+ }
+
+ return new HighlightField(highlighterContext.fieldName, responses.toArray(new Text[]{}));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
new file mode 100644
index 0000000..a3e327b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class CustomHighlighterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-highlighter";
+ }
+
+ @Override
+ public String description() {
+ return "Custom highlighter to test pluggable implementation";
+ }
+
+ public void onModule(HighlightModule highlightModule) {
+ highlightModule.registerHighlighter(CustomHighlighter.class);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
new file mode 100644
index 0000000..a123003
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numNodes = 1)
+public class CustomHighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put("plugin.types", CustomHighlighterPlugin.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Before
+ protected void setup() throws Exception{
+ client().prepareIndex("test", "test", "1").setSource(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("name", "arbitrary content")
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ }
+
+ @Test
+ public void testThatCustomHighlightersAreSupported() throws IOException {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField("name").setHighlighterType("test-custom")
+ .execute().actionGet();
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception {
+ HighlightBuilder.Field highlightConfig = new HighlightBuilder.Field("name");
+ highlightConfig.highlighterType("test-custom");
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myFieldOption", "someValue");
+ highlightConfig.options(options);
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField(highlightConfig)
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception {
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myGlobalOption", "someValue");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setHighlighterOptions(options)
+ .setHighlighterType("test-custom")
+ .addHighlightedField("name")
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
new file mode 100644
index 0000000..c4398d9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
@@ -0,0 +1,2703 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.MatchQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder.Field;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // see #3486
+ public void testHighTermFrequencyDoc() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no"))
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 5))));
+ ensureYellow();
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < 6000; i++) {
+ builder.append("abc").append(" ");
+ }
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", builder.toString())
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, startsWith("<em>abc</em> <em>abc</em> <em>abc</em> <em>abc</em>"));
+ }
+
+ @Test
+ public void testNgramHighlightingWithBrokenPositions() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("name")
+ .startObject("fields")
+ .startObject("autocomplete")
+ .field("type", "string")
+ .field("index_analyzer", "autocomplete")
+ .field("search_analyzer", "search_autocomplete")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .field("type", "multi_field")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("analysis.tokenizer.autocomplete.max_gram", 20)
+ .put("analysis.tokenizer.autocomplete.min_gram", 1)
+ .put("analysis.tokenizer.autocomplete.token_chars", "letter,digit")
+ .put("analysis.tokenizer.autocomplete.type", "nGram")
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .putArray("analysis.filter.wordDelimiter.type_table",
+ "& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM",
+ "? => ALPHANUM", ". => ALPHANUM", "- => ALPHANUM", "# => ALPHANUM", "% => ALPHANUM",
+ "+ => ALPHANUM", ", => ALPHANUM", "~ => ALPHANUM", ": => ALPHANUM", "/ => ALPHANUM",
+ "^ => ALPHANUM", "$ => ALPHANUM", "@ => ALPHANUM", ") => ALPHANUM", "( => ALPHANUM",
+ "] => ALPHANUM", "[ => ALPHANUM", "} => ALPHANUM", "{ => ALPHANUM")
+
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", false)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+
+ .put("analysis.analyzer.autocomplete.tokenizer", "autocomplete")
+ .putArray("analysis.analyzer.autocomplete.filter", "lowercase", "wordDelimiter")
+ .put("analysis.analyzer.search_autocomplete.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "ARCOTEL Hotels Deutschland").get();
+ refresh();
+ SearchResponse search = client().prepareSearch("test").setTypes("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)).addHighlightedField("name.autocomplete").execute().actionGet();
+ assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCO<em>TEL</em> Ho<em>tel</em>s <em>Deut</em>schland"));
+ }
+
+ @Test
+ public void testMultiPhraseCutoff() throws ElasticsearchException, IOException {
+ /*
+ * MultiPhraseQuery can literally kill an entire node if there are too many terms in the
+ * query. We cut off and extract terms if there are more than 16 terms in the query
+ */
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "body", "type=string,index_analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
+ .setSettings(
+ ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", true)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+ .put("analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter"))
+ );
+
+ ensureGreen();
+ client().prepareIndex("test", "test", "1")
+ .setSource("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature")
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com ").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, startsWith("<em>Test: http://www.facebook.com</em>"));
+ search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http</em>://<em>www</em>.<em>facebook</em>.<em>com</em> <em>http</em>://<em>elasticsearch</em>.<em>org</em> <em>http</em>://<em>xing</em>.<em>com</em> <em>http</em>://<em>cnn</em>.<em>com</em> <em>http</em>://<em>quora</em>.com"));
+ }
+
+ @Test
+ public void testNgramHighlightingPreLucene42() throws ElasticsearchException, IOException {
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
+ "name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.version", "4.1")
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.version", "4.1")
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.name2_index_analyzer.filter", "lowercase", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name_search_analyzer.filter", "lowercase")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ client().prepareIndex("test", "test", "2")
+ .setSource("name", "avinci, unilog avinci, logicacmg, logica",
+ "name2", "avinci, unilog avinci, logicacmg, logica").get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica m"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+ assertHighlight(search, 1, "name", 0, equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica ma"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+ assertHighlight(search, 1, "name", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica m"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica ma"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+ assertHighlight(search, 1, "name2", 0, equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>"));
+
+ }
+
+ @Test
+ public void testNgramHighlighting() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
+ "name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit")
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")));
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ refresh();
+ ensureGreen();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> ehemals avinci - the know how company"));
+ }
+
+ @Test
+ public void testEnsureNoNegativeOffsets() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1",
+ "no_long_term", "type=string,term_vector=with_positions_offsets",
+ "long_term", "type=string,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("no_long_term", "This is a test where foo is highlighed and should be highlighted",
+ "long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted")
+ .get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed"))
+ .addHighlightedField("long_term", 18, 1)
+ .get();
+ assertHighlight(search, 0, "long_term", 0, 1, equalTo("<em>thisisaverylongwordandmakessurethisfails</em>"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 18, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+ assertNotHighlighted(search, 0, "no_long_term");
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 30, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+
+ assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a <b>test</b> where <b>foo</b> is <b>highlighed</b> and"));
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title and don't use term vector, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "no").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .execute().get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("index_options", "offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .array("title", "This is a test on the highlighting bug present in elasticsearch. Hopefully it works.",
+ "This is the second bug to perform highlighting on.")
+ .startArray("attachments").startObject().field("body", "attachment for this test").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //asking for the whole field to be highlighted
+ .addHighlightedField("title", -1, 0).get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch. Hopefully it works."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //sentences will be generated out of each value
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> for this test"));
+ assertHighlight(search, i, "attachments.body", 1, 2, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testHighlightIssue1994() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=no", "titleTV", "type=string,store=no,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ indexRandom(false, client().prepareIndex("test", "type1", "1")
+ .setSource("title", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"},
+ "titleTV", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"}));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "2")
+ .setSource("titleTV", new String[]{"some text to highlight", "highlight other text"}));
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 2)
+ .addHighlightedField("titleTV", -1, 2)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "title", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+ assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("titleTV", "highlight"))
+ .addHighlightedField("titleTV", -1, 2)
+ .get();
+
+ assertHighlight(search, 0, "titleTV", 0, equalTo("some text to <em>highlight</em>"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("<em>highlight</em> other text"));
+ }
+
+ @Test
+ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "this is another test").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1 and field2 produces different tags");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().order("score").preTags("<global>").postTags("</global>")
+ .field(new HighlightBuilder.Field("field1"))
+ .field(new HighlightBuilder.Field("field2").preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <global>test</global>"));
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("this is another <field2>test</field2>"));
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/5175
+ public void testHighlightingOnWildcardFields() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "field-postings", "type=string,index_options=offsets",
+ "field-fvh", "type=string,term_vector=with_positions_offsets",
+ "field-plain", "type=string"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field-postings", "This is the first test sentence. Here is the second one.",
+ "field-fvh", "This is the test with term_vectors",
+ "field-plain", "This is the test for the plain highlighter").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field*");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field-plain", "test"))
+ .highlight(highlight().field("field*").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field-postings", 0, 1, equalTo("This is the first <xxx>test</xxx> sentence."));
+ assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the <xxx>test</xxx> with term_vectors"));
+ assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the <xxx>test</xxx> for the plain highlighter"));
+ }
+
+ @Test
+ public void testForceSourceWithSourceDisabled() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ //just to make sure that we hit the stored fields rather than the _source
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets").endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content").get();
+ refresh();
+
+ //works using stored field
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>"))
+ .get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("plain").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("fvh").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("postings").forceSource(true))
+ .get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field1"));
+ searchResponse = client().search(Requests.searchRequest("test").source(searchSource)).get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field*"));
+ searchResponse = client().search(Requests.searchRequest("test").source(searchSource)).get();
+ assertThat(searchResponse.getFailedShards(), equalTo(1));
+ assertThat(searchResponse.getShardFailures().length, equalTo(1));
+ assertThat(searchResponse.getShardFailures()[0].reason(), matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source"));
+ }
+
+ @Test
+ public void testPlainHighlighter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(constantScoreQuery(prefixQuery("_all", "qui")))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(boolQuery().should(constantScoreQuery(prefixQuery("_all", "qui"))))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ /**
+ * The FHV can spend a long time highlighting degenerate documents if phraseLimit is not set.
+ */
+ @Test(timeout=120000)
+ public void testFVHManyMatches() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ // Index one megabyte of "t " over and over and over again
+ client().prepareIndex("test", "type1")
+ .setSource("field1", Joiner.on("").join(Iterables.limit(Iterables.cycle("t "), 1024*256))).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "t"))
+ .highlight(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, containsString("<xxx>t</xxx>"));
+ logger.info("--> done");
+ }
+
+
+ @Test
+ public void testMatchedFieldsFvhRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(true);
+ }
+
+ @Test
+ public void testMatchedFieldsFvhNoRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(false);
+ }
+
+ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("bar")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("bar")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+
+ index("test", "type1", "1",
+ "foo", "running with scissors");
+ index("test", "type1", "2",
+ "foo", "cat cat junk junk junk junk junk junk junk cats junk junk",
+ "bar", "cat cat junk junk junk junk junk junk junk cats junk junk");
+ index("test", "type1", "3",
+ "foo", "weird",
+ "bar", "result");
+ refresh();
+
+ Field fooField = new Field("foo").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ Field barField = new Field("bar").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ SearchRequestBuilder req = client().prepareSearch("test").addHighlightedField(fooField);
+
+ // First check highlighting without any matched fields set
+ SearchResponse resp = req.setQuery(queryString("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And that matching a subfield doesn't automatically highlight it
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("running with <em>scissors</em>"));
+
+ // Add the subfield to the list of matched fields but don't match it. Everything should still work
+ // like before we added it.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now make half the matches come from the stored field and half from just a matched field.
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now remove the stored field from the matched field list. That should work too.
+ fooField.matchedFields("foo.plain");
+ resp = req.setQuery(queryString("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with scissors"));
+
+ // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryString("foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now just all matches are against the matched field. This still returns highlighting.
+ resp = req.setQuery(queryString("foo.plain:running foo.plain:scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And all matched field via the queryString's field parameter, just in case
+ resp = req.setQuery(queryString("running scissors").field("foo.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Finding the same string two ways is ok too
+ resp = req.setQuery(queryString("run foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // But we use the best found score when sorting fragments
+ resp = req.setQuery(queryString("cats foo.plain:cats^5").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // which can also be written by searching on the subfield
+ resp = req.setQuery(queryString("cats").field("foo").field("foo.plain^5")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled
+ QueryBuilder twoFieldsQuery = queryString("cats").field("foo").field("foo.plain^5")
+ .field("bar").field("bar.plain^5");
+ resp = req.setQuery(twoFieldsQuery).addHighlightedField(barField).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // And you can enable matchedField highlighting on both
+ barField.matchedFields("bar", "bar.plain");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Setting a matchedField that isn't searched/doesn't exist is simply ignored.
+ barField.matchedFields("bar", "candy");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // If the stored field doesn't have a value it doesn't matter what you match, you get nothing.
+ barField.matchedFields("bar", "foo.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo.plain").field("bar")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // If the stored field is found but the matched field isn't then you don't get a result either.
+ fooField.matchedFields("bar.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo")));
+
+ // But if you add the stored field to the list of matched fields then you'll get a result again
+ fooField.matchedFields("foo", "bar.plain");
+ resp = req.setQuery(queryString("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // You _can_ highlight fields that aren't subfields of one another.
+ resp = req.setQuery(queryString("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>weird</em>"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>resul</em>t"));
+
+ //But be careful. It'll blow up if there is a result paste the end of the field.
+ resp = req.setQuery(queryString("result").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertThat("Expected ShardFailures", resp.getShardFailures().length, greaterThan(0));
+ }
+
+ @Test
+ @Slow
+ public void testFastVectorHighlighterManyDocs() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "test " + i);
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
+ }
+
+ logger.info("--> searching explicitly on field1 and highlighting on it, with DFS");
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
+ }
+
+ logger.info("--> searching explicitly _all and highlighting on _all");
+ searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("_all", "test"))
+ .addHighlightedField("_all", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id() + " "));
+ }
+ }
+
+ public XContentBuilder type1TermVectorMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .startObject("field2").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ @Test
+ public void testSameContent() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testFastVectorHighlighterOffsetParameter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets").get());
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml_vector() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testMultiMapperVectorWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperVectorFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=no"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the enabling fast vector highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+ assertNoFailures(search);
+
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter")
+ .execute().actionGet();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("tit*", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter")
+ .execute().actionGet();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
+ }
+ }
+
+ @Test
+ public void testDisableFastVectorHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets,analyzer=classic"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ // Because of SOLR-3724 nothing is highlighted when FVH is used
+ assertNotHighlighted(search, i, "title");
+ }
+
+ // Using plain highlighter instead of FVH
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+
+ // Using plain highlighter instead of FVH on the field level
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField(new HighlightBuilder.Field("title").highlighterType("highlighter"))
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+ }
+
+ @Test
+ public void testFSHHighlightAllMvFragments() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "tags", "type=string,term_vector=with_positions_offsets"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("tags", new String[]{
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long and has the tag token near the end"}).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "tag"))
+ .addHighlightedField("tags", -1, 0).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really long <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long and has the <em>tag</em> token near the end"));
+ }
+
+ @Test
+ public void testBoostingQuery() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testBoostingQueryTermVector() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(
+ searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsQuery() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(commonTerms("field2", "quick brown").cutoffFrequency(100))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsTermVector() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTerms("field2", "quick brown").cutoffFrequency(100)).from(0).size(60)
+ .explain(true).highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(
+ searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testPhrasePrefix() throws ElasticsearchException, IOException {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "quick => fast");
+
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping())
+ .addMapping("type2", "_all", "store=yes,termVector=with_positions_offsets",
+ "field4", "type=string,term_vector=with_positions_offsets,analyzer=synonym",
+ "field3", "type=string,analyzer=synonym"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "0")
+ .setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick browse button is a fancy thing, right bro?").get();
+ refresh();
+ logger.info("--> highlighting and searching on field0");
+ SearchSourceBuilder source = searchSource()
+ .query(matchPhrasePrefixQuery("field0", "quick bro"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field0").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field1");
+ source = searchSource()
+ .query(matchPhrasePrefixQuery("field1", "quick bro"))
+ .from(0).size(60).explain(true)
+ .highlight(highlight().field("field1").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"));
+ assertHighlight(searchResponse, 1, "field1", 0, 1, equalTo("The <x>quick brown</x> fox jumps over the lazy dog"));
+
+ // with synonyms
+ client().prepareIndex("test", "type2", "0")
+ .setSource("field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type2", "1")
+ .setSource("field4", "The quick browse button is a fancy thing, right bro?").get();
+ client().prepareIndex("test", "type2", "2")
+ .setSource("field4", "a quick fast blue car").get();
+ refresh();
+
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field3", "fast bro")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field3").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"));
+ assertHighlight(searchResponse, 1, "field4", 0, 1, equalTo("<x>The quick brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeFilter("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")).from(0).size(60).explain(true)
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>a quick fast blue car</x>"));
+ }
+
+ @Test
+ public void testPlainHighlightDifferentFragmenter() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "tags", "type=string"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("tags",
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long tag and has the tag token near the end").endObject()).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("span")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("invalid")).get();
+ fail("Shouldn't get here");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ @Test
+ public void testPlainHighlighterMultipleFields() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterMultipleFields() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,term_vectors=with_positions_offsets", "field2", "type=string,term_vectors=with_positions_offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testMissingStoredField() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", "highlight_field", "type=string,store=yes"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "highlight")
+ .endObject()).get();
+ refresh();
+
+ // This query used to fail when the field to highlight was absent
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField(new HighlightBuilder.Field("highlight_field")
+ .fragmentSize(-1).numOfFragments(1).fragmenter("simple")).get();
+ assertThat(response.getHits().hits()[0].highlightFields().isEmpty(), equalTo(true));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3211
+ public void testNumericHighlighting() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "text", "type=string,index=analyzed",
+ "byte", "type=byte", "short", "type=short", "int", "type=integer", "long", "type=long",
+ "float", "type=float", "double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "elasticsearch test",
+ "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text")
+ .addHighlightedField("byte")
+ .addHighlightedField("short")
+ .addHighlightedField("int")
+ .addHighlightedField("long")
+ .addHighlightedField("float")
+ .addHighlightedField("double")
+ .get();
+ // Highlighting of numeric fields is not supported, but it should not raise errors
+ // (this behavior is consistent with version 0.20)
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3200
+ public void testResetTwice() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(ImmutableSettings.builder()
+ .put("analysis.analyzer.my_analyzer.type", "pattern")
+ .put("analysis.analyzer.my_analyzer.pattern", "\\s+")
+ .build())
+ .addMapping("type", "text", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource("text", "elasticsearch test").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text").execute().actionGet();
+ // PatternAnalyzer will throw an exception if it is resetted twice
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testHighlightUsesHighlightQuery() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "Testing the highlight query feature");
+ refresh();
+
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text");
+
+ SearchRequestBuilder search = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing"))
+ .addHighlightedField(field);
+ Matcher<String> searchQueryMatcher = equalTo("<em>Testing</em> the highlight query feature");
+
+ field.highlighterType("plain");
+ SearchResponse response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+
+
+ Matcher<String> hlQueryMatcher = equalTo("Testing the highlight <em>query</em> feature");
+ field.highlightQuery(matchQuery("text", "query"));
+
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ // Make sure the the highlightQuery is taken into account when it is set on the highlight context instead of the field
+ search.setHighlighterQuery(matchQuery("text", "query"));
+ field.highlighterType("fvh").highlightQuery(null);
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+ }
+
+ private static String randomStoreField() {
+ if (randomBoolean()) {
+ return "store=yes,";
+ }
+ return "";
+ }
+
+ @Test
+ public void testHighlightNoMatchSize() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text = "I am pretty long so some of me should get cut off. Second sentence";
+ index("test", "type1", "1", "text", text);
+ refresh();
+
+ // When you don't set noMatchSize you don't get any results if there isn't anything to highlight.
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain");
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is set to 0 you also shouldn't get any
+ field.highlighterType("plain").noMatchSize(0);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is between 0 and the size of the string
+ field.highlighterType("plain").noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ // The FVH also works but the fragment is longer than the plain highlighter because of boundary_max_scan
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment longer than the input string and get the whole string
+ field.highlighterType("plain").noMatchSize(text.length() * 2);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment exactly the size of the input field and get the whole field
+ field.highlighterType("plain").noMatchSize(text.length());
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // You can set noMatchSize globally in the highlighter as well
+ field.highlighterType("plain").noMatchSize(null);
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We don't break if noMatchSize is less than zero though
+ field.highlighterType("plain").noMatchSize(randomIntBetween(Integer.MIN_VALUE, -1));
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "I am pretty long so some of me should get cut off. We'll see how that goes.";
+ String text2 = "I am short";
+ index("test", "type1", "1", "text", new String[] {text1, text2});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain")
+ .noMatchSize(21);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // And noMatchSize returns nothing when the first entry is empty string!
+ index("test", "type1", "2", "text", new String[] {"", text2});
+ refresh();
+
+ IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // But if the field was actually empty then you should get no highlighting field
+ index("test", "type1", "3", "text", new String[] {});
+ refresh();
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // Same for if the field doesn't even exist on the document
+ index("test", "type1", "4");
+ refresh();
+
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "postings");
+
+ // Again same if the field isn't mapped
+ field = new HighlightBuilder.Field("unmapped")
+ .highlighterType("plain")
+ .noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "This is the first sentence. This is the second sentence.";
+ String text2 = "This is the third sentence. This is the fourth sentence.";
+ String text3 = "This is the fifth sentence";
+ index("test", "type1", "1", "text", new String[] {text1, text2, text3});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(1)
+ .numOfFragments(0)
+ .highlighterType("plain")
+ .noMatchSize(20);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence."));
+
+ //if there's a match we only return the values with matches (whole value as number_of_fragments == 0)
+ MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+ }
+
+ @Test
+ public void testPostingsHighlighter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(matchPhraseQuery("_all", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ //phrase query results in highlighting all different terms regardless of their positions
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ //lets fall back to the standard highlighter then, what people would do to highlight query matches
+ logger.info("--> searching on _all, highlighting on field2, falling back to the plain highlighter");
+ source = searchSource()
+ .query(matchPhraseQuery("_all", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>").highlighterType("highlighter"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog"));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultipleFields() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()).get());
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox. Second sentence.", "field2", "The <b>slow<b> brown fox. Second sentence.");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>."));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>."));
+ }
+
+ @Test
+ public void testPostingsHighlighterNumberOfFragments() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
+ "field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(5).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").numOfFragments(2).preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, 2, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+
+ client().prepareIndex("test", "type1", "2")
+ .setSource("field1", new String[]{"The quick brown fox jumps over the lazy dog. Second sentence not finished", "The lazy red fox jumps over the quick dog.", "The quick brown dog jumps over the lazy fox."}).get();
+ refresh();
+
+ source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(0).preTags("<field1>").postTags("</field1>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit searchHit : searchResponse.getHits()) {
+ if ("1".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. The lazy red <field1>fox</field1> jumps over the quick dog. The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else if ("2".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. Second sentence not finished"));
+ assertHighlight(searchHit, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else {
+ fail("Only hits with id 1 and 2 are returned");
+ }
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterRequireFieldMatch() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
+ "field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(true).preTags("<field2>").postTags("</field2>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ //field2 is not returned highlighted because of the require field match option set to true
+ assertNotHighlighted(searchResponse, 0, "field2");
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ logger.info("--> highlighting and searching on field1 and field2 - require field match set to false");
+ source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(false).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(false).preTags("<field2>").postTags("</field2>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ //field2 is now returned highlighted thanks to require_field_match set to false
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field2", 2, 3, equalTo("The quick brown dog jumps over the lazy <field2>fox</field2>."));
+
+ logger.info("--> highlighting and searching on field1 and field2 via multi_match query");
+ source = searchSource()
+ .query(multiMatchQuery("fox", "field1", "field2"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>"))
+ .field(new HighlightBuilder.Field("field2").requireFieldMatch(true).preTags("<field2>").postTags("</field2>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ //field2 is now returned highlighted thanks to the multi_match query on both fields
+ assertHighlight(searchResponse, 0, "field2", 0, equalTo("The quick brown <field2>fox</field2> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field2", 1, equalTo("The lazy red <field2>fox</field2> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field2", 2, 3, equalTo("The quick brown dog jumps over the lazy <field2>fox</field2>."));
+ }
+
+ @Test
+ public void testPostingsHighlighterOrderByScore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", new String[]{"This sentence contains one match, not that short. This sentence contains two sentence matches. This one contains no matches.",
+ "This is the second value's first sentence. This one contains no matches. This sentence contains three sentence occurrences (sentence).",
+ "One sentence match here and scored lower since the text is quite long, not that appealing. This one contains no matches."}).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "sentence"))
+ .highlight(highlight().field("field1").order("score"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ Map<String,HighlightField> highlightFieldMap = searchResponse.getHits().getAt(0).highlightFields();
+ assertThat(highlightFieldMap.size(), equalTo(1));
+ HighlightField field1 = highlightFieldMap.get("field1");
+ assertThat(field1.fragments().length, equalTo(5));
+ assertThat(field1.fragments()[0].string(), equalTo("This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
+ assertThat(field1.fragments()[1].string(), equalTo("This <em>sentence</em> contains two <em>sentence</em> matches."));
+ assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first <em>sentence</em>."));
+ assertThat(field1.fragments()[3].string(), equalTo("This <em>sentence</em> contains one match, not that short."));
+ assertThat(field1.fragments()[4].string(), equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing."));
+
+ //lets use now number_of_fragments = 0, so that we highlight per value without breaking them into snippets, but we sort the values by score
+ source = searchSource()
+ .query(termQuery("field1", "sentence"))
+ .highlight(highlight().field("field1", -1, 0).order("score"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("This is the second value's first <em>sentence</em>. This one contains no matches. This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("This <em>sentence</em> contains one match, not that short. This <em>sentence</em> contains two <em>sentence</em> matches. This one contains no matches."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing. This one contains no matches."));
+ }
+
+ @Test
+ public void testPostingsHighlighterEscapeHtml() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", "title", "type=string," + randomStoreField() + "index_options=offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(searchResponse, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;?"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperWithStore() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ //just to make sure that we hit the stored fields rather than the _source
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test . Second sentence.").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ //lets make sure we analyze the query and we highlight the resulting terms
+ .setQuery(matchQuery("title", "This is a Test"))
+ .addHighlightedField("title").get();
+
+ assertHitCount(searchResponse, 1l);
+ SearchHit hit = searchResponse.getHits().getAt(0);
+ assertThat(hit.source(), nullValue());
+ //stopwords are not highlighted since not indexed
+ assertHighlight(hit, "title", 0, 1, equalTo("this is a <em>test</em> ."));
+
+ // search on title.key and highlight on title
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+ assertHitCount(searchResponse, 1l);
+
+ //stopwords are now highlighted since we used only whitespace analyzer here
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em> ."));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperFromSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+
+ assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "docs").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the postings highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+ assertNoFailures(search);
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings-highlighter")
+ .get();
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings")
+ .get();
+
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("tit*")
+ .setHighlighterType("postings")
+ .get();
+
+ assertThat(search.getFailedShards(), equalTo(2));
+ for (ShardSearchFailure shardSearchFailure : search.getShardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterBoostingQuery() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterCommonTermsQuery() throws ElasticsearchException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTerms("field2", "quick brown").cutoffFrequency(100))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ public XContentBuilder type1PostingsffsetsMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("index_options", "offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("index_options", "offsets").endObject()
+ .startObject("field2").field("type", "string").field("index_options", "offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private static final String[] REWRITE_METHODS = new String[]{"constant_score_auto", "scoring_boolean", "constant_score_boolean",
+ "constant_score_filter", "top_terms_boost_50", "top_terms_50"};
+
+ @Test
+ public void testPostingsHighlighterPrefixQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterFuzzyQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterWildcardQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+
+ source = searchSource().query(wildcardQuery("field2", "qu*k").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterTermRangeQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "aaab").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("<em>aaab</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryString() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(queryString("qui*").defaultField("field2").rewrite(rewriteMethod))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+").rewrite(rewriteMethod)))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(boolQuery()
+ .should(constantScoreQuery(FilterBuilders.missingFilter("field1")))
+ .should(matchQuery("field1", "test"))
+ .should(filteredQuery(queryString("field1:photo*").rewrite(rewriteMethod), null)))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(boolQuery().must(prefixQuery("field1", "photo").rewrite(rewriteMethod)).should(matchQuery("field1", "test").minimumShouldMatch("0")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception {
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ for (String rewriteMethod : REWRITE_METHODS) {
+ SearchSourceBuilder source = searchSource().query(filteredQuery(queryString("field1:photo*").rewrite(rewriteMethod), missingFilter("field_null")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)
+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+ }
+
+ @Test
+ @Slow
+ public void testPostingsHighlighterManyDocs() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ Map<String, String> prefixes = new HashMap<String, String>(COUNT);
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ //generating text with word to highlight in a different position
+ //(https://github.com/elasticsearch/elasticsearch/issues/4103)
+ String prefix = randomAsciiOfLengthBetween(5, 30);
+ prefixes.put(String.valueOf(i), prefix);
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix
+ + " test. Sentence two.");
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1")
+ .get();
+ assertHitCount(searchResponse, (long)COUNT);
+ assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
+ for (SearchHit hit : searchResponse.getHits()) {
+ String prefix = prefixes.get(hit.id());
+ assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
+ }
+
+ logger.info("--> searching explicitly on field1 and highlighting on it, with DFS");
+ searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1")
+ .get();
+ assertHitCount(searchResponse, (long)COUNT);
+ assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
+ for (SearchHit hit : searchResponse.getHits()) {
+ String prefix = prefixes.get(hit.id());
+ assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
+ }
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/4116
+ public void testPostingsHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=offsets,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence."));
+ }
+
+ @Test
+ public void testFastVectorHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+ }
+
+ @Test
+ public void testPlainHighlighterCustomIndexName() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_name=my_field"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "First sentence. Second sentence.").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("field1", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("field1").get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first")).addHighlightedField("my_field").get();
+ assertHighlight(searchResponse, 0, "my_field", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("my_field", "first"))
+ .addHighlightedField("field1").setHighlighterRequireFieldMatch(true).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("<em>First</em> sentence. Second sentence."));
+ }
+
+ @Test
+ public void testFastVectorHighlighterPhraseBoost() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ phraseBoostTestCase("fvh");
+ }
+
+ @Test
+ public void testPostingsHighlighterPhraseBoost() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ phraseBoostTestCase("postings");
+ }
+
+ /**
+ * Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter
+ * because it doesn't support the concept of terms having a different weight based on position.
+ * @param highlighterType highlighter to test
+ */
+ private void phraseBoostTestCase(String highlighterType) {
+ ensureGreen();
+ StringBuilder text = new StringBuilder();
+ text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ text.append("highlight words together\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ index("test", "type1", "1", "field1", text.toString());
+ refresh();
+
+ // Match queries
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ matchQuery("field1", "highlight words together"),
+ matchPhraseQuery("field1", "highlight words together"));
+
+ // Query string with a single field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1"),
+ queryString("\"highlight words together\"").field("field1").autoGeneratePhraseQueries(true));
+
+ // Query string with a single field without dismax
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1").useDisMax(false),
+ queryString("\"highlight words together\"").field("field1").useDisMax(false).autoGeneratePhraseQueries(true));
+
+ // Query string with more than one field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryString("highlight words together").field("field1").field("field2"),
+ queryString("\"highlight words together\"").field("field1").field("field2").autoGeneratePhraseQueries(true));
+
+ // Query string boosting the field
+ phraseBoostTestCaseForClauses(highlighterType, 1f,
+ queryString("highlight words together").field("field1"),
+ queryString("\"highlight words together\"").field("field1^100").autoGeneratePhraseQueries(true));
+ }
+
+ private <P extends QueryBuilder & BoostableQueryBuilder> void
+ phraseBoostTestCaseForClauses(String highlighterType, float boost, QueryBuilder terms, P phrase) {
+ Matcher<String> highlightedMatcher = Matchers.<String>either(containsString("<em>highlight words together</em>")).or(
+ containsString("<em>highlight</em> <em>words</em> <em>together</em>"));
+ SearchRequestBuilder search = client().prepareSearch("test").setHighlighterRequireFieldMatch(true)
+ .setHighlighterOrder("score").setHighlighterType(highlighterType)
+ .addHighlightedField("field1", 100, 1);
+
+ // Try with a bool query
+ phrase.boost(boost);
+ SearchResponse response = search.setQuery(boolQuery().must(terms).should(phrase)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ phrase.boost(1);
+ // Try with a boosting query
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(boost).negativeBoost(1)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ // Try with a boosting query using a negative boost
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(1).negativeBoost(1/boost)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java b/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
new file mode 100644
index 0000000..50d4428
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.indicesboost;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleIndicesBoostSearchTests extends ElasticsearchIntegrationTest {
+
+ private static final Settings DEFAULT_SETTINGS = ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ @Test
+ public void testIndicesBoost() throws Exception {
+ ElasticsearchAssertions.assertHitCount(client().prepareSearch().setQuery(termQuery("test", "value")).get(), 0);
+
+ try {
+ client().prepareSearch("test").setQuery(termQuery("test", "value")).execute().actionGet();
+ fail("should fail");
+ } catch (Exception e) {
+ // ignore, no indices
+ }
+
+ client().admin().indices().create(createIndexRequest("test1").settings(DEFAULT_SETTINGS)).actionGet();
+ client().admin().indices().create(createIndexRequest("test2").settings(DEFAULT_SETTINGS)).actionGet();
+ client().index(indexRequest("test1").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").endObject())).actionGet();
+ client().index(indexRequest("test2").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ float indexBoost = 1.1f;
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ SearchResponse response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+
+ logger.info("--- DFS_QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
new file mode 100644
index 0000000..ba510ab
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.matchedqueries;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItemInArray;
+
+/**
+ *
+ */
+public class MatchedQueriesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMatchedQueryFromFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test1", "number", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test2", "number", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test3", "number", 3).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), orFilter(rangeFilter("number").lte(2).filterName("test1"), rangeFilter("number").gt(2).filterName("test2")))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(orFilter(
+ termFilter("name", "test").filterName("name"),
+ termFilter("title", "title1").filterName("title"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(queryFilter(boolQuery()
+ .should(termQuery("name", "test").queryName("name"))
+ .should(termQuery("title", "title1").queryName("title")))).get();
+
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test", "title", "title2").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test", "title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("title", "title1", "title2", "title3").filterName("title")))
+ .setPostFilter(termFilter("name", "test").filterName("name")).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title"))
+ .setPostFilter(queryFilter(matchQuery("name", "test").queryName("name"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterSupportsName() {
+ createIndex("test1", "test2");
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", "1").setSource("title", "title1").get();
+ client().prepareIndex("test2", "type1", "2").setSource("title", "title2").get();
+ client().prepareIndex("test2", "type1", "3").setSource("title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ orFilter(
+ indicesFilter(termFilter("title", "title1").filterName("title1"), "test1")
+ .noMatchFilter(termFilter("title", "title2").filterName("title2")).filterName("indices_filter"),
+ termFilter("title", "title3").filterName("title3")).filterName("or"))).get();
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title1"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title2"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("title3"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ /**
+ * Test case for issue #4361: https://github.com/elasticsearch/elasticsearch/issues/4361
+ */
+ @Test
+ public void testMatchedWithShould() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("content", "Lorem ipsum dolor sit amet").get();
+ client().prepareIndex("test", "type1", "2").setSource("content", "consectetur adipisicing elit").get();
+ refresh();
+
+ // Execute search at least two times to load it in cache
+ int iter = atLeast(2);
+ for (int i = 0; i < iter; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ boolQuery()
+ .minimumNumberShouldMatch(1)
+ .should(queryString("dolor").queryName("dolor"))
+ .should(queryString("elit").queryName("elit"))
+ )
+ .setPreference("_primary")
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("dolor"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("elit"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java b/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
new file mode 100644
index 0000000..dc3b1df
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.msearch;
+
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SimpleMultiSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMultiSearch() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("field", "xxx").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "yyy").execute().actionGet();
+ refresh();
+ MultiSearchResponse response = client().prepareMultiSearch()
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+
+ for (MultiSearchResponse.Item item : response) {
+ assertNoFailures(item.getResponse());
+ }
+ assertThat(response.getResponses().length, equalTo(3));
+ assertHitCount(response.getResponses()[0].getResponse(), 1l);
+ assertHitCount(response.getResponses()[1].getResponse(), 1l);
+ assertHitCount(response.getResponses()[2].getResponse(), 2l);
+ assertFirstHit(response.getResponses()[0].getResponse(), hasId("1"));
+ assertFirstHit(response.getResponses()[1].getResponse(), hasId("2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
new file mode 100644
index 0000000..8a6c494
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.preference;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
+
+ @Test // see #2896
+ public void testStopOneNodePreferenceWithRedState() throws InterruptedException {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().size()+2).put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ cluster().stopRandomNode();
+ client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).execute().actionGet();
+ String[] preferences = new String[] {"_primary", "_local", "_primary_first", "_only_local", "_prefer_node:somenode", "_prefer_node:server2"};
+ for (String pref : preferences) {
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT).setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ searchResponse = client().prepareSearch().setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ }
+ }
+
+
+ @Test
+ public void noPreferenceRandom() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ final Client client = cluster().smartClient();
+ SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String firstNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+ searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String secondNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+
+ assertThat(firstNodeId, not(equalTo(secondNodeId)));
+ }
+
+ @Test
+ public void simplePreferenceTests() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test (expected = ElasticsearchIllegalArgumentException.class)
+ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareSearch().setQuery(matchAllQuery()).setPreference("_only_node:DOES-NOT-EXIST").execute().actionGet();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java b/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
new file mode 100644
index 0000000..3127d72
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
@@ -0,0 +1,375 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.query;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class MultiMatchQueryTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void init() throws Exception {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.perfect_match.type", "custom")
+ .put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
+ .put("index.analysis.analyzer.perfect_match.filter", "lowercase")
+ .put("index.analysis.analyzer.category.type", "custom")
+ .put("index.analysis.analyzer.category.tokenizer", "whitespace")
+ .put("index.analysis.analyzer.category.filter", "lowercase")
+ );
+ assertAcked(builder.addMapping("test", createMapping()));
+ ensureGreen();
+ int numDocs = atLeast(50);
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ builders.add(client().prepareIndex("test", "test", "theone").setSource(
+ "full_name", "Captain America",
+ "first_name", "Captain",
+ "last_name", "America",
+ "category", "marvel hero"));
+ builders.add(client().prepareIndex("test", "test", "theother").setSource(
+ "full_name", "marvel hero",
+ "first_name", "marvel",
+ "last_name", "hero",
+ "category", "bogus"));
+
+ builders.add(client().prepareIndex("test", "test", "ultimate1").setSource(
+ "full_name", "Alpha the Ultimate Mutant",
+ "first_name", "Alpha the",
+ "last_name", "Ultimate Mutant",
+ "category", "marvel hero"));
+ builders.add(client().prepareIndex("test", "test", "ultimate2").setSource(
+ "full_name", "Man the Ultimate Ninja",
+ "first_name", "Man the Ultimate",
+ "last_name", "Ninja",
+ "category", "marvel hero"));
+
+ builders.add(client().prepareIndex("test", "test", "anotherhero").setSource(
+ "full_name", "ultimate",
+ "first_name", "wolferine",
+ "last_name", "",
+ "category", "marvel hero"));
+ List<String> firstNames = new ArrayList<String>();
+ fill(firstNames, "Captain", between(15, 25));
+ fill(firstNames, "Ultimate", between(5, 10));
+ fillRandom(firstNames, between(3, 7));
+ List<String> lastNames = new ArrayList<String>();
+ fill(lastNames, "Captain", between(3, 7));
+ fillRandom(lastNames, between(30, 40));
+ for (int i = 0; i < numDocs; i++) {
+ String first = RandomPicks.randomFrom(getRandom(), firstNames);
+ String last = randomPickExcept(lastNames, first);
+ builders.add(client().prepareIndex("test", "test", "" + i).setSource(
+ "full_name", first + " " + last,
+ "first_name", first,
+ "last_name", last,
+ "category", randomBoolean() ? "marvel hero" : "bogus"));
+ }
+ indexRandom(true, builders);
+ }
+
+ @Test
+ public void testDefaults() throws ExecutionException, InterruptedException {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR)).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), equalTo(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).type(type)).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ }
+
+ private XContentBuilder createMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("test")
+ .startObject("properties")
+ .startObject("full_name")
+ .field("type", "string")
+ .field("copy_to", "full_name_phrase")
+ .field("analyzer", "perfect_match")
+ .endObject()
+ .startObject("category")
+ .field("type", "string")
+ .field("analyzer", "category")
+ .field("index_option", "docs")
+ .endObject()
+ .startObject("first_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "first_name_phrase")
+ .field("index_option", "docs")
+ .endObject()
+ .startObject("last_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "last_name_phrase")
+ .field("index_option", "docs")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ public void testPhraseType() {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE)).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE)).get();
+ assertThat(searchResponse.getHits().getTotalHits(), greaterThan(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("the Ul", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE_PREFIX)).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertSecondHit(searchResponse, hasId("ultimate1"));
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testCutoffFreq() throws ExecutionException, InterruptedException {
+ final long numDocs = client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ Float cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency)).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThanOrEqualTo(searchResponse.getHits().hits()[1].getScore()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+ long size = searchResponse.getHits().getTotalHits();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type)).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat("common terms expected to be a way smaller result set", size, lessThan(searchResponse.getHits().getTotalHits()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ }
+
+
+ public void testEquivalence() {
+
+ final int numDocs = (int) client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ int numIters = atLeast(5);
+ for (int i = 0; i < numIters; i++) {
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).type(type)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(disMaxQuery().
+ add(matchQuery("full_name", "marvel hero captain america"))
+ .add(matchQuery("first_name", "marvel hero captain america"))
+ .add(matchQuery("last_name", "marvel hero captain america"))
+ .add(matchQuery("category", "marvel hero captain america"))
+ ).get();
+ assertEquivalent("marvel hero captain america", left, right);
+ }
+
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(op).useDisMax(false).minimumShouldMatch(minShouldMatch).type(type)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(randomBoolean() ? termQuery("full_name", "captain america") : matchQuery("full_name", "captain america").operator(op))
+ .should(matchQuery("first_name", "captain america").operator(op))
+ .should(matchQuery("last_name", "captain america").operator(op))
+ .should(matchQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("capta", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE_PREFIX).useDisMax(false).minimumShouldMatch(minShouldMatch)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhrasePrefixQuery("full_name", "capta"))
+ .should(matchPhrasePrefixQuery("first_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("last_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("category", "capta").operator(op))
+ ).get();
+ assertEquivalent("capta", left, right);
+ }
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE).useDisMax(false).minimumShouldMatch(minShouldMatch)).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhraseQuery("full_name", "captain america"))
+ .should(matchPhraseQuery("first_name", "captain america").operator(op))
+ .should(matchPhraseQuery("last_name", "captain america").operator(op))
+ .should(matchPhraseQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+ }
+ }
+
+
+ private static final void assertEquivalent(String query, SearchResponse left, SearchResponse right) {
+ assertNoFailures(left);
+ assertNoFailures(right);
+ SearchHits leftHits = left.getHits();
+ SearchHits rightHits = right.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query + " hit: " + i, (double)hits[i].getScore(), closeTo(rHits[i].getScore(), 0.00001d));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length - 1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query, hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+
+ public static List<String> fill(List<String> list, String value, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(value);
+ }
+ return list;
+ }
+
+ public List<String> fillRandom(List<String> list, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(randomRealisticUnicodeOfCodepointLengthBetween(1, 5));
+ }
+ return list;
+ }
+
+ public <T> T randomPickExcept(List<T> fromList, T butNot) {
+ while (true) {
+ T t = RandomPicks.randomFrom(getRandom(), fromList);
+ if (t.equals(butNot)) {
+ continue;
+ }
+ return t;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java b/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java
new file mode 100644
index 0000000..073a63d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java
@@ -0,0 +1,2179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.apache.lucene.util.English;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.facet.FacetBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+
+public class SimpleQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOmitNormsOnAll() throws ExecutionException, InterruptedException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("omit_norms", true).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).get();
+ SearchHit[] hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(equalTo(hits[1].getScore()), equalTo(hits[2].getScore())));
+ cluster().wipeIndices("test");
+
+ assertAcked(client().admin().indices().prepareCreate("test"));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).get();
+ hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(greaterThan(hits[1].getScore()), greaterThan(hits[2].getScore())));
+
+ }
+ @Test // see #3952
+ public void testEmptyQueryString() throws ExecutionException, InterruptedException, IOException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+ assertHitCount(client().prepareSearch().setQuery(queryString("quick")).get(), 3l);
+ assertHitCount(client().prepareSearch().setQuery(queryString("")).get(), 0l); // return no docs
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3177
+ public void testIssue3177() {
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 1)));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3").get();
+ ensureGreen();
+ waitForRelocation();
+ optimize();
+ refresh();
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andFilter(
+ queryFilter(matchAllQuery()),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(
+ filteredQuery(
+ boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2"))
+ .should(termQuery("field1", "value3")),
+ notFilter(andFilter(queryFilter(termQuery("field1", "value1")),
+ queryFilter(termQuery("field1", "value2")))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(notFilter(termFilter("field1", "value3"))).get(),
+ 2l);
+ }
+
+ @Test
+ public void passQueryAsStringTest() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery("{ \"term\" : { \"field1\" : \"value1_1\" }}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndexOptions() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=docs")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox").setRefresh(true).get();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+ try {
+ client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue("wrong exception message " + e.getMessage(), e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ }
+
+ @Test // see #3521
+ public void testConstantScoreQuery() throws Exception {
+ Random random = getRandom();
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+ ensureYellow();
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get();
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ client().prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat())).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ constantScoreQuery(boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat()))))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ int num = atLeast(100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ createIndex("test_1");
+ indexRandom(true, builders);
+ ensureYellow();
+ int queryRounds = atLeast(10);
+ for (int i = 0; i < queryRounds; i++) {
+ MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num)));
+ searchResponse = client().prepareSearch("test_1").setQuery(matchQuery).setSize(num).get();
+ long totalHits = searchResponse.getHits().totalHits();
+ SearchHits hits = searchResponse.getHits();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+ if (random.nextBoolean()) {
+ searchResponse = client().prepareSearch("test_1").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat())))).setSize(num).get();
+ hits = searchResponse.getHits();
+ } else {
+ FilterBuilder filter = queryFilter(matchQuery);
+ searchResponse = client().prepareSearch("test_1").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(filter).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat())))).setSize(num).get();
+ hits = searchResponse.getHits();
+ }
+ assertThat(hits.totalHits(), equalTo(totalHits));
+ if (totalHits > 1) {
+ float expected = hits.getAt(0).score();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(expected));
+ }
+ }
+ }
+ }
+
+ @Test // see #3521
+ public void testAllDocsQueryString() throws InterruptedException, ExecutionException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_REPLICAS, 0));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("foo", "bar"),
+ client().prepareIndex("test", "type1", "2").setSource("foo", "bar")
+ );
+ int iters = atLeast(100);
+ for (int i = 0; i < iters; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryString("*:*^10.0").boost(10.0f)).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(Math.sqrt(2), 0.1));
+ assertThat((double)searchResponse.getHits().getAt(1).score(),closeTo(Math.sqrt(2), 0.1));
+ }
+ }
+
+ @Test
+ public void testCommonTermsQueryOnAllField() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "message", "type=string", "comment", "type=string,boost=5.0")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("message", "test message", "comment", "whatever"),
+ client().prepareIndex("test", "type1", "2").setSource("message", "hello world", "comment", "test comment"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTerms("_all", "test")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore()));
+ }
+
+ @Test
+ public void testCommonTermsQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("4")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery("{ \"common\" : { \"field1\" : { \"query\" : \"the lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 4 } } } }").get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTerms("field1", "the quick brown").cutoffFrequency(3).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // standard drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with match query
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // standard drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with multi match query
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThirdHit(searchResponse, hasId("2"));
+ }
+
+ @Test
+ public void testOmitTermFreqsAndPositions() throws Exception {
+ Version version = Version.CURRENT;
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ try {
+ // backwards compat test!
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,omit_term_freq_and_positions=true")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_VERSION_CREATED, version.id));
+ assertThat(version.onOrAfter(Version.V_1_0_0_RC2), equalTo(false));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+ try {
+ client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue(e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
+ }
+ cluster().wipeIndices("test");
+ } catch (MapperParsingException ex) {
+ assertThat(version.toString(), version.onOrAfter(Version.V_1_0_0_RC2), equalTo(true));
+ assertThat(ex.getCause().getMessage(), equalTo("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'DOCS_ONLY'] instead"));
+ }
+ version = randomVersion();
+ }
+ }
+
+ @Test
+ public void queryStringAnalyzedWildcard() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("value*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("*ue*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("*ue_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("val*e_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("v?l*e?1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testLowercaseExpandedTerms() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("VALUE_3~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ searchResponse = client().prepareSearch().setQuery(queryString("ValUE_*").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("vAl*E_1")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("[VALUE_1 TO VALUE_3]")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryString("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testDateRangeInQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("past:[now-2M/d TO now/d]")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ try {
+ client().prepareSearch().setQuery(queryString("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ fail("D is an unsupported unit in date math");
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void typeFilterTypeIndexedTests() throws Exception {
+ typeFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void typeFilterTypeNotIndexedTests() throws Exception {
+ typeFilterTests("no");
+ }
+
+ private void typeFilterTests(String index) throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "3").setSource("field1", "value1"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeFilter("type1"))).get(), 2l);
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeFilter("type2"))).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2l);
+ assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5l);
+ }
+
+ @Test
+ public void idsFilterTestsIdIndexed() throws Exception {
+ idsFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void idsFilterTestsIdNotIndexed() throws Exception {
+ idsFilterTests("no");
+ }
+
+ private void idsFilterTests(String index) throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_id").field("index", index).endObject()
+ .endObject().endObject()));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("7", "10")).get();
+ assertHitCount(searchResponse, 0l);
+
+ // repeat..., with terms
+ searchResponse = client().prepareSearch().setTypes("type1").setQuery(constantScoreQuery(termsFilter("_id", "1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1_2"),
+ client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"),
+ client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), limitFilter(2))).get(), 2l);
+ }
+
+ @Test
+ public void filterExistsMissingTests() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()) );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(existsFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(queryString("_exists_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field3"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingFilter("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(queryString("_missing_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+ }
+
+ @Test
+ public void passQueryOrFilterAsJSONStringTest() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1l);
+
+ BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"));
+ assertHitCount(client().prepareSearch().setQuery(bool).get(), 1l);
+
+ WrapperFilterBuilder wrapperFilter = new WrapperFilterBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setPostFilter(wrapperFilter).get(), 1l);
+ }
+
+ @Test
+ public void testFiltersWithCustomCacheKey() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryNumeric() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d),
+ client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d),
+ client().prepareIndex("test", "type1", "3").setSource("long", 3l, "double", 3.0d));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("long", "1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+ try {
+ client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException ex) {
+ // number format exception
+ }
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value4", "field3", "value3"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1") );
+
+ MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2");
+ SearchResponse searchResponse = client().prepareSearch().setQuery(builder)
+ .addFacet(FacetBuilders.termsFacet("field1").field("field1")).get();
+
+ assertHitCount(searchResponse, 2l);
+ // this uses dismax so scores are equal and the order can be arbitrary
+ assertSearchHits(searchResponse, "1", "2");
+
+ builder.useDisMax(false);
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1", "field1", "field2")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ refresh();
+ builder = multiMatchQuery("value1", "field1", "field3^1.5")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f)
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ // Test lenient
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value7", "field2", "value8", "field4", 5).get();
+ refresh();
+
+ builder = multiMatchQuery("value1", "field1", "field2", "field4");
+ try {
+ client().prepareSearch().setQuery(builder).get();
+ fail("Exception expected");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ builder.lenient(true);
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ public void testMultiMatchQueryZeroTermsQuery() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get();
+ refresh();
+
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(multiMatchQuery("value4", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(multiMatchQuery("a", "field1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryMinShouldMatch() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("field1", new String[]{"value1", "value2", "value3"}).get();
+ client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2");
+
+ multiMatchQuery.useDisMax(true);
+ multiMatchQuery.minimumShouldMatch("70%");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(multiMatchQuery)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery.useDisMax(false);
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1");
+ multiMatchQuery.minimumShouldMatch("100%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testFuzzyQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("str:kimcy~1")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:11~1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("date:2012-02-02~1d")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQuotedQueryStringWithBoost() throws InterruptedException, ExecutionException {
+ float boost = 10.0f;
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("important", "phrase match", "less_important", "nothing important"),
+ client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match")
+ );
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryString("\"phrase match\"").field("important", boost).field("less_important")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(queryString("\"phrase match\"").field("important", boost).field("less_important").useDisMax(false)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+ }
+
+ @Test
+ public void testSpecialRangeSyntaxInQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryString("num:>19")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>20")).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>=20")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:>11")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("num:<=20")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryString("+num:>11 +num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testEmptyTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("term", new String[0]))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), idsFilter())).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testFieldDataTermsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("str", "1", "lng", 1l, "dbl", 1.0d).get();
+ client().prepareIndex("test", "type", "2").setSource("str", "2", "lng", 2l, "dbl", 2.0d).get();
+ client().prepareIndex("test", "type", "3").setSource("str", "3", "lng", 3l, "dbl", 3.0d).get();
+ client().prepareIndex("test", "type", "4").setSource("str", "4", "lng", 4l, "dbl", 4.0d).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "1", "4").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[]{2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new int[] {1, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new float[] {2, 4}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ // test partial matching
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "2", "5").execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // test valid type, but no matching terms
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("str", "5", "6").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("dbl", new double[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsFilter("lng", new long[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testTermsLookupFilter() throws Exception {
+ assertAcked(prepareCreate("lookup").addMapping("type", "terms","type=string", "other", "type=string"));
+ assertAcked(prepareCreate("lookup2").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("arr").startObject("properties").startObject("term").field("type", "string")
+ .endObject().endObject().endObject().endObject().endObject().endObject()));
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}),
+ client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}),
+ client().prepareIndex("lookup", "type", "3").setSource("terms", new String[]{"2", "4"}),
+ client().prepareIndex("lookup", "type", "4").setSource("other", "value"),
+ client().prepareIndex("lookup2", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "1").endObject()
+ .startObject().field("term", "3").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "2").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "3").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .startObject().field("term", "4").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // same as above, just on the _id...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("_id").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // another search with same parameters...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("2").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("3").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup").lookupType("type").lookupId("4").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("1").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("2").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("term").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupFilter("not_exists").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testBasicFilterById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter("type1").ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1", "type2").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsFilter().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter().ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter(null).ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsFilter("type1", "type2", "type3").ids("1", "2", "3", "4"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testBasicQueryById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2").ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery(null).ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testNumericTermsAndRanges() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1,
+ "num_long", 1, "num_float", 1, "num_double", 1).get();
+
+ client().prepareIndex("test", "type1", "2").setSource("num_byte", 2, "num_short", 2, "num_integer", 2,
+ "num_long", 2, "num_float", 2, "num_double", 2).get();
+
+ client().prepareIndex("test", "type1", "17").setSource("num_byte", 17, "num_short", 17, "num_integer", 17,
+ "num_long", 17, "num_float", 17, "num_double", 17).get();
+ refresh();
+
+ SearchResponse searchResponse;
+ logger.info("--> term query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_byte", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_short", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_integer", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_long", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_float", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_double", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_byte", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_short", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_integer", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_long", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_float", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_double", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> term filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_byte", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_short", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_integer", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_long", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_float", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termFilter("num_double", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_byte", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_short", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_integer", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_long", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_float", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsFilter("num_double", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testNumericRangeFilter_2826() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "test1", "num_long", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "test1", "num_long", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "test2", "num_long", 3).get();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "test2", "num_long", 4).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made 2826 fail! (only with bit based filters)
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made #2979 fail!
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolFilter()
+ .must(termFilter("field1", "test1"))
+ .should(rangeFilter("num_long", 1, 2))
+ .should(rangeFilter("num_long", 3, 4))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testEmptyTopLevelFilter() {
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch().setPostFilter("{}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test // see #2926
+ public void testMustNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 2, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 4l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery()
+ .mustNot(matchQuery("description", "anything").type(Type.BOOLEAN))
+ ).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test // see #2994
+ public void testSimpleSpan() throws ElasticsearchException, IOException, ExecutionException, InterruptedException {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanTermQuery("description", "bar"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanTermQuery("test.description", "bar"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ spanNearQuery()
+ .clause(spanTermQuery("description", "foo"))
+ .clause(spanTermQuery("test.description", "other"))
+ .slop(3)).get();
+ assertHitCount(searchResponse, 3l);
+ }
+
+ @Test
+ public void testSpanMultiTermQuery() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar", "count", 1).get();
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything", "count", 2).get();
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other", "count", 3).get();
+ client().prepareIndex("test", "test", "4").setSource("description", "fop", "count", 4).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))).get();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(rangeQuery("description").from("ffa").to("foo"))))
+ .execute().actionGet();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))).get();
+ assertHitCount(response, 3);
+ }
+
+ @Test
+ public void testSimpleDFSQuery() throws ElasticsearchException, IOException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 5, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("s", jsonBuilder()
+ .startObject()
+ .startObject("s")
+ .startObject("_routing")
+ .field("required", true)
+ .field("path", "bs")
+ .endObject()
+ .startObject("properties")
+ .startObject("online")
+ .field("type", "boolean")
+ .endObject()
+ .startObject("ts")
+ .field("type", "date")
+ .field("ignore_malformed", false)
+ .field("format", "dateOptionalTime")
+ .endObject()
+ .startObject("bs")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .addMapping("bs", "online", "type=boolean", "ts", "type=date,ignore_malformed=false,format=dateOptionalTime"));
+ ensureGreen();
+
+ client().prepareIndex("test", "s", "1").setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "s", "2").setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000).get();
+ client().prepareIndex("test", "bs", "3").setSource("online", false, "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "bs", "4").setSource("online", true, "ts", System.currentTimeMillis() - 123123).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(
+ boolQuery()
+ .must(termQuery("online", true))
+ .must(boolQuery()
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "bs"))
+ )
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "s"))
+ )
+ )
+ )
+ .setVersion(true)
+ .setFrom(0).setSize(100).setExplain(true).get();
+ assertNoFailures(response);
+ }
+
+ @Test
+ public void testMultiFieldQueryString() {
+ client().prepareIndex("test", "s", "1").setSource("field1", "value1", "field2", "value2").setRefresh(true).get();
+ logger.info("regular");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value1").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value1")).get(), 1);
+ logger.info("prefix");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value*")).get(), 1);
+ logger.info("wildcard");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("v?lue*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:v?lue*")).get(), 1);
+ logger.info("fuzzy");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("value~").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:value~")).get(), 1);
+ logger.info("regexp");
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("/value[01]/").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryString("field\\*:/value[01]/")).get(), 1);
+ }
+
+ // see #3881 - for extensive description of the issue
+ @Test
+ public void testMatchQueryWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fast").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testMatchQueryWithStackedStems() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "keyword_repeat", "porterStem", "unique_stem")
+ .put("index.analysis.filter.unique_stem.type", "unique")
+ .put("index.analysis.filter.unique_stem.only_on_same_position", true));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "run fox run").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testQueryStringWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryString("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch().setQuery(queryString("fast").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(queryString("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3898
+ public void testCustomWordDelimiterQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings("analysis.analyzer.my_analyzer.type", "custom",
+ "analysis.analyzer.my_analyzer.tokenizer", "whitespace",
+ "analysis.analyzer.my_analyzer.filter", "custom_word_delimiter",
+ "analysis.filter.custom_word_delimiter.type", "word_delimiter",
+ "analysis.filter.custom_word_delimiter.generate_word_parts", "true",
+ "analysis.filter.custom_word_delimiter.generate_number_parts", "false",
+ "analysis.filter.custom_word_delimiter.catenate_numbers", "true",
+ "analysis.filter.custom_word_delimiter.catenate_words", "false",
+ "analysis.filter.custom_word_delimiter.split_on_case_change", "false",
+ "analysis.filter.custom_word_delimiter.split_on_numerics", "false",
+ "analysis.filter.custom_word_delimiter.stem_english_possessive", "false")
+ .addMapping("type1", "field1", "type=string,analyzer=my_analyzer", "field2", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "foo bar baz", "field2", "not needed").get();
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ queryString("foo.baz").useDisMax(false).defaultOperator(QueryStringQueryBuilder.Operator.AND)
+ .field("field1").field("field2")).get();
+ assertHitCount(response, 1l);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3797
+ public void testMultiMatchLenientIssue3797() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", 123, "field2", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field2^2").lenient(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndicesQuery() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery(matchQuery("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match query is match_all
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testIndicesFilter() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter(termFilter("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match filter is "all"
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesFilter(termFilter("text", "value1"), "index1")
+ .noMatchFilter("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesQuerySkipParsing() throws Exception {
+ createIndex("simple");
+ client().admin().indices().prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get();
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2").get();
+ refresh();
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setQuery(hasChildQuery("child", matchQuery("text", "value"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ //has_child doesn't get parsed for "simple" index
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setQuery(indicesQuery(hasChildQuery("child", matchQuery("text", "value2")), "related")
+ .noMatchQuery(matchQuery("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesFilterSkipParsing() throws Exception {
+ createIndex("simple");
+ client().admin().indices().prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()).get();
+ ensureGreen();
+
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get();
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2").get();
+ refresh();
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setPostFilter(hasChildFilter("child", termFilter("text", "value1"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setPostFilter(indicesFilter(hasChildFilter("child", termFilter("text", "value2")), "related")
+ .noMatchFilter(termFilter("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test
+ public void testIndicesQueryMissingIndices() throws IOException {
+ createIndex("index1");
+ createIndex("index2");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match").get();
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match").get();
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match").get();
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match").get();
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match").get();
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match").get();
+ refresh();
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1", "test2", "test3")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "index1", "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterMissingIndices() throws IOException {
+ createIndex("index1");
+ createIndex("index2");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match").get();
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match").get();
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match").get();
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match").get();
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match").get();
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match").get();
+ refresh();
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "test1", "test2", "test3")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "test1")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesFilter(termFilter("field", "missing"), "index1", "test1")
+ .noMatchFilter(termFilter("field", "match")))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testMinScore() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("score", 1.5).get();
+ client().prepareIndex("test", "test", "2").setSource("score", 1).get();
+ client().prepareIndex("test", "test", "3").setSource("score", 2).get();
+ client().prepareIndex("test", "test", "4").setSource("score", 0.5).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(
+ functionScoreQuery(scriptFunction("_doc['score'].value"))).setMinScore(1.5f).get();
+ assertHitCount(searchResponse, 2);
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQueryStringWithSlopAndFields() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "customer", "1").setSource("desc", "one two three").get();
+ client().prepareIndex("test", "product", "2").setSource("desc", "one two three").get();
+ refresh();
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("desc")).get();
+ assertHitCount(searchResponse, 2);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").field("product.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one three\"~5").field("product.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("customer.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("\"one two\"").defaultField("customer.desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ }
+
+ private static FilterBuilder rangeFilter(String field, Object from, Object to) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return FilterBuilders.rangeFilter(field).from(from).to(to);
+ } else {
+ return FilterBuilders.rangeFilter(field).from(from).to(to).setExecution("fielddata");
+ }
+ } else {
+ return FilterBuilders.numericRangeFilter(field).from(from).to(to);
+ }
+ }
+
+ @Test
+ public void testSimpleQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo").get();
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar").get();
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar").get();
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant").get();
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti").get();
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryString("foo bar")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar").defaultOperator(SimpleQueryStringBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(simpleQueryString("\"quux baz\" +(eggplant | spaghetti)")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "4", "5");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("eggplants").analyzer("snowball")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("spaghetti").field("body", 10.0f).field("otherbody", 2.0f)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("5"));
+ assertSearchHits(searchResponse, "5", "6");
+ }
+
+ @Test
+ public void testNestedFieldSimpleQueryString() throws IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("body").field("type", "string")
+ .startObject("fields")
+ .startObject("sub").field("type", "string")
+ .endObject() // sub
+ .endObject() // fields
+ .endObject() // body
+ .endObject() // properties
+ .endObject() // type1
+ .endObject()));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo bar baz").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("type1.body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar baz").field("type1.body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+ @Test
+ public void testSimpleQueryStringFlags() {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo").get();
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar").get();
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar").get();
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant").get();
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti").get();
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo bar").flags(SimpleQueryStringFlag.ALL)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setSource("{\n" +
+ " \"query\": {\n" +
+ " \"simple_query_string\": {\n" +
+ " \"query\": \"foo|bar\",\n" +
+ " \"default_operator\": \"AND\"," +
+ " \"flags\": \"NONE\"\n" +
+ " }\n" +
+ " }\n" +
+ "}").get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryString("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.WHITESPACE, SimpleQueryStringFlag.PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+ }
+
+ @Test
+ public void testRangeFilterNoCacheWithNow() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("type1", "date", "type=date,format=YYYY-mm-dd"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "field", "value")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.rangeFilter("date").from("2013-01-01").to("now")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c `now` is used in `to`.
+ IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now"))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // filter cache should not contain any thing, b/c `now` is used in `to`.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now/d").cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ // Now with rounding is used, so we must have something in filter cache
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ long filtercacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+ assertThat(filtercacheSize, greaterThan(0l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.termFilter("field", "value").cache(true))
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now"))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // and because we use term filter, it is also added to filter cache, so it should contain more than before
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));
+ filtercacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(
+ matchAllQuery(),
+ FilterBuilders.boolFilter().cache(true)
+ .must(FilterBuilders.matchAllFilter())
+ .must(FilterBuilders.rangeFilter("date").from("2013-01-01").to("now").cache(true))
+ ))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ // The range filter is now explicitly cached, so it now it is in the filter cache.
+ statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get();
+ assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));
+ }
+
+ @Test
+ public void testSearchEmptyDoc() {
+ prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}").get();
+ client().prepareIndex("test", "type1", "1").setSource("{}").get();
+ refresh();
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java b/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
new file mode 100644
index 0000000..0cf491f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+
+
+import org.apache.lucene.util.English;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class QueryRescorerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRescorePhrase() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(3)))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer((QueryBuilders.matchPhraseQuery("field1", "the quick brown"))))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+ }
+
+ @Test
+ public void testMoreDocs() throws Exception {
+ Builder builder = ImmutableSettings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("index_analyzer", "whitespace").field("search_analyzer", "synonym")
+ .endObject().endObject().endObject().endObject();
+
+ client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "massachusetts avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "lexington avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "boston avenue lexington massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "boston road lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "5").setSource("field1", "lexington street lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "6").setSource("field1", "massachusetts avenue lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "7").setSource("field1", "bosten street san franciso california").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "8").setSource("field1", "hollywood boulevard los angeles california").execute().actionGet();
+ client().prepareIndex("test", "type1", "9").setSource("field1", "1st street boston massachussetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "10").setSource("field1", "1st street boston massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "11").setSource("field1", "2st street boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "12").setSource("field1", "3st street boston massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+ }
+
+ private static final void assertEquivalent(String query, SearchResponse plain, SearchResponse rescored) {
+ assertNoFailures(plain);
+ assertNoFailures(rescored);
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query, hits[i].getScore(), equalTo(rHits[i].getScore()));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query,hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+ private static final void assertEquivalentOrSubstringMatch(String query, SearchResponse plain, SearchResponse rescored) {
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] otherHits = rightHits.getHits();
+ if (!hits[0].getId().equals(otherHits[0].getId())) {
+ assertThat(((String) otherHits[0].sourceAsMap().get("field1")).contains(query), equalTo(true));
+ } else {
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat(query, hits[i].getId(), equalTo(rightHits.getHits()[i].getId()));
+ }
+ }
+ }
+
+ @Test
+ // forces QUERY_THEN_FETCH because of https://github.com/elasticsearch/elasticsearch/issues/4829
+ public void testEquivalence() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 5)).put("index.number_of_replicas", between(0, 1))).execute().actionGet();
+ ensureGreen();
+
+ int numDocs = atLeast(100);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ ensureGreen();
+ final int iters = atLeast(50);
+ for (int i = 0; i < iters; i++) {
+ int resultSize = between(5, 30);
+ int rescoreWindow = between(1, 3) * resultSize;
+ String intToEnglish = English.intToEnglish(between(0, numDocs-1));
+ String query = intToEnglish.split(" ")[0];
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(0.0f)) // no weight - so we basically use the same score as the actual query
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+
+ SearchResponse plain = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR)).setFrom(0).setSize(resultSize)
+ .execute().actionGet();
+
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", "not in the index").slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(1.0f))
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(0))
+ .setQueryWeight(1.0f).setRescoreQueryWeight(1.0f)).setRescoreWindow(2 * rescoreWindow).execute().actionGet();
+ // check equivalence or if the first match differs we check if the phrase is a substring of the top doc
+ assertEquivalentOrSubstringMatch(intToEnglish, plain, rescored);
+ }
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+ prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ refresh();
+
+ {
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f)).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(searchResponse.getHits().getAt(i).explanation(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(i).explanation().isMatch(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails().length, equalTo(2));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].isMatch(), equalTo(true));
+ if (i == 2) {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getValue(), equalTo(0.5f));
+ } else {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDescription(), equalTo("sum of:"));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].getDetails()[1].getValue(), equalTo(0.5f));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getDetails()[1].getValue(), equalTo(0.4f));
+ }
+ }
+ }
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ String[] descriptionModes = new String[]{ "max of:", "min of:", "avg of:", "sum of:", "product of:", "sum of:" };
+ for (int i = 0; i < scoreModes.length; i++) {
+ QueryRescorer rescoreQuery = RescoreBuilder.queryRescorer(QueryBuilders.matchQuery("field1", "the quick brown").boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
+
+ if (!"".equals(scoreModes[i])) {
+ rescoreQuery.setScoreMode(scoreModes[i]);
+ }
+
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(rescoreQuery).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int j = 0; j < 3; j++) {
+ assertThat(searchResponse.getHits().getAt(j).explanation().getDescription(), equalTo(descriptionModes[i]));
+ }
+ }
+ }
+
+ @Test
+ public void testScoring() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("index", "not_analyzed").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1,5)).put("index.number_of_replicas", between(0,1))).get();
+ int numDocs = atLeast(100);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ ensureGreen();
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ float primaryWeight = 1.1f;
+ float secondaryWeight = 1.6f;
+
+ for (String scoreMode: scoreModes) {
+ for (int i = 0; i < numDocs - 4; i++) {
+ String[] intToEnglish = new String[] { English.intToEnglish(i), English.intToEnglish(i + 1), English.intToEnglish(i + 2), English.intToEnglish(i + 3) };
+
+ QueryRescorer rescoreQuery = RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("5.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("7.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("0.0f"))))
+ .setQueryWeight(primaryWeight)
+ .setRescoreQueryWeight(secondaryWeight);
+
+ if (!"".equals(scoreMode)) {
+ rescoreQuery.setScoreMode(scoreMode);
+ }
+
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("2.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("3.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[2])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("5.0f")))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3])).boostMode(CombineFunction.REPLACE).add(ScoreFunctionBuilders.scriptFunction("0.2f"))))
+ .setFrom(0)
+ .setSize(10)
+ .setRescorer(rescoreQuery)
+ .setRescoreWindow(50).execute().actionGet();
+
+ assertHitCount(rescored, 4);
+
+ if ("total".equals(scoreMode) || "".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight));
+ } else if ("max".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight));
+ } else if ("min".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 2)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 1)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight));
+ } else if ("avg".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f));
+ } else if ("multiply".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight));
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java b/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
new file mode 100644
index 0000000..9d7fb0a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class SearchScanScrollingTests extends ElasticsearchIntegrationTest {
+
+ public void testRandomized() throws Exception {
+ testScroll(between(1, 4), atLeast(100), between(1, 300), getRandom().nextBoolean(), getRandom().nextBoolean());
+ }
+
+ private void testScroll(int numberOfShards, long numberOfDocs, int size, boolean unbalanced, boolean trackScores) throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", numberOfShards)).get();
+ ensureGreen();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < numberOfDocs; i++) {
+ String id = Integer.toString(i);
+ expectedIds.add(id);
+ String routing = null;
+ if (unbalanced) {
+ if (i < (numberOfDocs * 0.6)) {
+ routing = "0";
+ } else if (i < (numberOfDocs * 0.9)) {
+ routing = "1";
+ } else {
+ routing = "2";
+ }
+ }
+ client().prepareIndex("test", "type1", id).setRouting(routing).setSource("field", i).execute().actionGet();
+ // make some segments
+ if (i % 10 == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setTrackScores(trackScores)
+ .execute().actionGet();
+ try {
+ assertHitCount(searchResponse, numberOfDocs);
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertHitCount(searchResponse, numberOfDocs);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exist in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ if (trackScores) {
+ assertThat(hit.getScore(), greaterThan(0.0f));
+ } else {
+ assertThat(hit.getScore(), equalTo(0.0f));
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java b/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
new file mode 100644
index 0000000..f3bb576
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchScanTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testNarrowingQuery() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1,5))).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[atLeast(50)];
+ for (int i = 0; i < builders.length/2; i++) {
+ expectedIds.add(Integer.toString(i));
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy1").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+
+ for (int i = builders.length/2; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy2").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+ indexRandom(true, builders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(termQuery("user", "kimchy1"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exists in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java
new file mode 100644
index 0000000..4167eaa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scriptfilter;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.client.Requests.refreshRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.FilterBuilders.scriptFilter;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest {
+ private final static Settings DEFAULT_SETTINGS = ImmutableSettings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ @Test
+ public void testCustomScriptBoost() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(DEFAULT_SETTINGS).execute().actionGet();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value > 1");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > 1")))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > param1").addParam("param1", 2)))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("doc['num1'].value > param1").addParam("param1", -1)))
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value")
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ }
+
+ private static AtomicInteger scriptCounter = new AtomicInteger(0);
+
+ public static int incrementScriptCounter() {
+ return scriptCounter.incrementAndGet();
+ }
+
+ @Test
+ public void testCustomScriptCache() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(DEFAULT_SETTINGS).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("test", "1").field("num", 1.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("test", "2").field("num", 2.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("test", "3").field("num", 3.0f).endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ String script = "org.elasticsearch.search.scriptfilter.ScriptFilterSearchTests.incrementScriptCounter() > 0";
+
+ scriptCounter.set(0);
+ logger.info("running script filter the first time");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(3));
+
+ scriptCounter.set(0);
+ logger.info("running script filter the second time");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "2"), scriptFilter(script).cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(0));
+
+ scriptCounter.set(0);
+ logger.info("running script filter with new parameters");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).addParam("param1", "1").cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(scriptCounter.get(), equalTo(3));
+
+ scriptCounter.set(0);
+ logger.info("running script filter with same parameters");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter(script).addParam("param1", "1").cache(true)))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(scriptCounter.get(), equalTo(0));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
new file mode 100644
index 0000000..33a3f91
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
@@ -0,0 +1,453 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scroll;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SearchScrollTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleScrollQueryThenFetch() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(30));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ String routing = "0";
+ if (i > 90) {
+ routing = "1";
+ } else if (i > 60) {
+ routing = "2";
+ }
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", i).setRouting(routing).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ for (int i = 0; i < 32; i++) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ }
+
+ // and now, the last one is one
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ // a the last is zero
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testScrollAndUpdateIndex() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 5)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy").field("postDate", System.currentTimeMillis()).field("message", "test").endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryString("user:kimchy"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("postDate", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ do {
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ Map<String, Object> map = searchHit.sourceAsMap();
+ map.put("message", "update");
+ client().prepareIndex("test", "tweet", searchHit.id()).setSource(map).execute().actionGet();
+ }
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ } while (searchResponse.getHits().hits().length > 0);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .addScrollId(searchResponse1.getScrollId())
+ .addScrollId(searchResponse2.getScrollId())
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(0));
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(0));
+ }
+
+ @Test
+ public void testClearNonExistentScrollId() throws Exception {
+ createIndex("idx");
+ ClearScrollResponse response = client().prepareClearScroll()
+ .addScrollId("cXVlcnlUaGVuRmV0Y2g7MzsyOlpBRC1qOUhrUjhhZ0NtQWUxU2FuWlE7MjpRcjRaNEJ2R1JZV1VEMW02ZGF1LW5ROzI6S0xUal9lZDRTd3lWNUhUU2VSb01CQTswOw==")
+ .get();
+ // Whether we actually clear a scroll, we can't know, since that information isn't serialized in the
+ // free search context response, which is returned from each node we want to clear a particular scroll.
+ assertThat(response.isSucceeded(), is(true));
+ }
+
+ @Test
+ public void testClearIllegalScrollId() throws Exception {
+ createIndex("idx");
+ try {
+ client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ // Fails during base64 decoding (Base64-encoded string must have at least four characters)
+ client().prepareClearScroll().addScrollId("a").get();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+ }
+ try {
+ client().prepareClearScroll().addScrollId("abcabc").get();
+ fail();
+ // if running without -ea this will also throw ElasticsearchIllegalArgumentException
+ } catch (UncategorizedExecutionException e) {
+ assertThat(e.getRootCause(), instanceOf(AssertionError.class));
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearAllScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll().addScrollId("_all")
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(0));
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(0l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(0));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/4156
+ public void testDeepPaginationWithOneDocIndexAndDoNotBlowUp() throws Exception {
+ client().prepareIndex("index", "type", "1")
+ .setSource("field", "value")
+ .setRefresh(true)
+ .execute().get();
+
+ for (SearchType searchType : SearchType.values()) {
+ SearchRequestBuilder builder = client().prepareSearch("index")
+ .setSearchType(searchType)
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(Integer.MAX_VALUE);
+
+ if (searchType == SearchType.SCAN || searchType != SearchType.COUNT && randomBoolean()) {
+ builder.setScroll("1m");
+ }
+
+ SearchResponse response = builder.execute().actionGet();
+ try {
+ ElasticsearchAssertions.assertHitCount(response, 1l);
+ } finally {
+ String scrollId = response.getScrollId();
+ if (scrollId != null) {
+ clearScroll(scrollId);
+ }
+ }
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
new file mode 100644
index 0000000..450692b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.simple;
+
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class SimpleSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSearchNullIndex() {
+ try {
+ client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+
+ }
+
+ try {
+ client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (ElasticsearchIllegalArgumentException e) {
+
+ }
+ }
+
+ @Test
+ public void testSearchRandomPreference() throws InterruptedException, ExecutionException {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", between(1, 3))).get();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomUnicodeOfLengthBetween(0, 4)).get();
+ assertHitCount(searchResponse, 6l);
+
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(search, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryString("_id:XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryString("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateRangeWithUpperInclusiveEnabledTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // test include upper on ranges to include the full day on the upper bound
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lte("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lt("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateRangeWithUpperInclusiveDisabledTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.mapping.date.round_ceil", false)).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ // test include upper on ranges to include the full day on the upper bound (disabled here though...)
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lte("2010-01-06")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05").lt("2010-01-06")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test @TestLogging("action.search.type:TRACE,action.admin.indices.refresh:TRACE")
+ public void simpleDateMathTests() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()).execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryString("field:[2010-01-03||+2d TO 2010-01-04||+2d]")).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void localDependentDateTests() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 20l);
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
new file mode 100644
index 0000000..265179d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
@@ -0,0 +1,1533 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+public class SimpleSortTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testTrackScores() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .field("ivalue", 100)
+ .field("dvalue", 0.1)
+ .endObject());
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "2")
+ .field("svalue", "bbb")
+ .field("ivalue", 200)
+ .field("dvalue", 0.2)
+ .endObject());
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), equalTo(Float.NaN));
+ }
+
+ // now check with score tracking
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .setTrackScores(true)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN)));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), not(equalTo(Float.NaN)));
+ }
+ }
+
+ public void testRandomSorting() throws ElasticsearchException, IOException, InterruptedException, ExecutionException {
+ int numberOfShards = between(1, 10);
+ Random random = getRandom();
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
+ .addMapping("type",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("sparse_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("dense_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+
+ TreeMap<BytesRef, String> sparseBytes = new TreeMap<BytesRef, String>();
+ TreeMap<BytesRef, String> denseBytes = new TreeMap<BytesRef, String>();
+ int numDocs = atLeast(200);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ String docId = Integer.toString(i);
+ BytesRef ref = null;
+ do {
+ ref = new BytesRef(_TestUtil.randomRealisticUnicodeString(random));
+ } while (denseBytes.containsKey(ref));
+ denseBytes.put(ref, docId);
+ XContentBuilder src = jsonBuilder().startObject().field("dense_bytes", ref.utf8ToString());
+ if (rarely()) {
+ src.field("sparse_bytes", ref.utf8ToString());
+ sparseBytes.put(ref, docId);
+ }
+ src.endObject();
+ builders[i] = client().prepareIndex("test", "type", docId).setSource(src);
+ }
+ indexRandom(true, builders);
+ {
+ int size = between(1, denseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(size)
+ .addSort("dense_bytes", SortOrder.ASC).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = denseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat("pos: " + i, searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ if (!sparseBytes.isEmpty()) {
+ int size = between(1, sparseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .setPostFilter(FilterBuilders.existsFilter("sparse_bytes")).setSize(size).addSort("sparse_bytes", SortOrder.ASC).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) sparseBytes.size()));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = sparseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ }
+
+
+ @Test
+ public void test3078() {
+ createIndex("test");
+ ensureGreen();
+
+ for (int i = 1; i < 101; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", Integer.toString(i)).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex and refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex - no refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // optimize
+ optimize();
+ refresh();
+
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+ }
+
+ @Test
+ public void testScoreSortDirection() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(customScoreQuery(matchAllQuery()).script("_source.field")).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testScoreSortDirection_withFunctionScore() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction("_source.field"))).addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+ @Test
+ public void testIssue2986() {
+ prepareCreate("test").setSettings(indexSettings()).execute().actionGet();
+
+ client().prepareIndex("test", "post", "1").setSource("{\"field1\":\"value1\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "2").setSource("{\"field1\":\"value2\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "3").setSource("{\"field1\":\"value3\"}").execute().actionGet();
+ refresh();
+ SearchResponse result = client().prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).execute().actionGet();
+
+ for (SearchHit hit : result.getHits()) {
+ assertFalse(Float.isNaN(hit.getScore()));
+ }
+ }
+
+ @Test
+ public void testIssue2991() {
+ for (int i = 1; i < 4; i++) {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", i).put("index.number_of_replicas", 0)).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("tag", "alpha").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "3").setSource("tag", "gamma").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "4").setSource("tag", "delta").execute().actionGet();
+
+ refresh();
+ client().prepareIndex("test", "type", "2").setSource("tag", "beta").execute().actionGet();
+
+ refresh();
+ SearchResponse resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("1"));
+ assertSecondHit(resp, hasId("2"));
+
+ resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("3"));
+ assertSecondHit(resp, hasId("4"));
+ }
+ }
+
+ @Test
+ public void testSimpleSorts() throws Exception {
+ final int numberOfShards = between(1, 10);
+ Random random = getRandom();
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("boolean_value").field("type", "boolean").endObject()
+ .startObject("byte_value").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_value").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer_value").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("long_value").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_value").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_value").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<IndexRequestBuilder>();
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder builder = client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("str_value", new String(new char[]{(char) (97 + i), (char) (97 + i)}))
+ .field("boolean_value", true)
+ .field("byte_value", i)
+ .field("short_value", i)
+ .field("integer_value", i)
+ .field("long_value", i)
+ .field("float_value", 0.1 * i)
+ .field("double_value", 0.1 * i)
+ .endObject());
+ builders.add(builder);
+ }
+ Collections.shuffle(builders, random);
+ for (IndexRequestBuilder builder : builders) {
+ builder.execute().actionGet();
+ if (random.nextBoolean()) {
+ if (random.nextInt(5) != 0) {
+ refresh();
+ } else {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ }
+ refresh();
+
+ // STRING
+ int size = 1 + random.nextInt(10);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.ASC)
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+
+ // STRING script
+ size = 1 + random.nextInt(10);
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort(new ScriptSortBuilder("doc['str_value'].value", "string"))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // BYTE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // SHORT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // INTEGER
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo(i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo((9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // LONG
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.DESC)
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // FLOAT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // DOUBLE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void test2920() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("test",
+ jsonBuilder().startObject().startObject("test").startObject("properties")
+ .startObject("value").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "" + i).endObject()).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.scriptSort("\u0027\u0027", "string")).setSize(10)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMinValueScript() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("lvalue").field("type", "long").endObject()
+ .startObject("dvalue").field("type", "double").endObject()
+ .startObject("svalue").field("type", "string").endObject()
+ .startObject("gvalue").field("type", "geo_point").endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder req = client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .field("svalue", new String[]{"" + i, "" + (i + 1), "" + (i + 2)})
+ .field("lvalue", new long[]{i, i + 1, i + 2})
+ .field("dvalue", new double[]{i, i + 1, i + 2})
+ .startObject("gvalue")
+ .startObject("location")
+ .field("lat", (double) i + 1)
+ .field("lon", (double) i)
+ .endObject()
+ .endObject()
+ .endObject());
+ req.execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .endObject()).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // test the long values
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Long.MAX_VALUE; for (v : doc['lvalue'].values){ retval = Math.min(v, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Long) searchResponse.getHits().getAt(i).field("min").value(), equalTo((long) i));
+ }
+ // test the double values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Double.MAX_VALUE; for (v : doc['dvalue'].values){ retval = Math.min(v, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+
+ // test the string values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Integer.MAX_VALUE; for (v : doc['svalue'].values){ retval = Math.min(Integer.parseInt(v), retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Integer) searchResponse.getHits().getAt(i).field("min").value(), equalTo(i));
+ }
+
+ // test the geopoint values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", "var retval = Double.MAX_VALUE; for (v : doc['gvalue'].values){ retval = Math.min(v.lon, retval);} return retval;")
+ .addSort("ord", SortOrder.ASC).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void testDocumentsWithNullValue() throws Exception {
+ // TODO: sort shouldn't fail when sort field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and sort operation fail when the sort field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject().string();
+ prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .nullField("svalue")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "3")
+ .field("svalue", "bbb")
+ .endObject()).execute().actionGet();
+
+
+ flush();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].values[0]")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ // a query with docs just with null values
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("id", "2"))
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("2"));
+ }
+
+ @Test
+ public void testSortMissingNumbers() throws Exception {
+ prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("i_value")
+ .field("type", "integer")
+ .startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "float")
+ .startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject()
+ .endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "float")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", 2)
+ .field("d_value", 2.2)
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test
+ public void testSortMissingStrings() throws ElasticsearchException, IOException {
+ prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("value")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "a")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "c")
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+
+ logger.info("--> sort with missing b");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test
+ public void testIgnoreUnmapped() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ logger.info("--> sort with an unmapped field, verify it fails");
+ try {
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk"))
+ .execute().actionGet();
+ assertThat("Expected exception but returned with", result, nullValue());
+ } catch (SearchPhaseExecutionException e) {
+ //we check that it's a parse failure rather than a different shard failure
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("Parse Failure [No mapping found for [kkk] in order to sort on]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk").ignoreUnmapped(true))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMVField() throws Exception {
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("long_values").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("int_values").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_values").field("type", "short").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("byte_values").field("type", "byte").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_values").field("type", "float").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_values").field("type", "double").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("long_values", 1l, 5l, 10l, 8l)
+ .array("int_values", 1, 5, 10, 8)
+ .array("short_values", 1, 5, 10, 8)
+ .array("byte_values", 1, 5, 10, 8)
+ .array("float_values", 1f, 5f, 10f, 8f)
+ .array("double_values", 1d, 5d, 10d, 8d)
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("long_values", 11l, 15l, 20l, 7l)
+ .array("int_values", 11, 15, 20, 7)
+ .array("short_values", 11, 15, 20, 7)
+ .array("byte_values", 11, 15, 20, 7)
+ .array("float_values", 11f, 15f, 20f, 7f)
+ .array("double_values", 11d, 15d, 20d, 7d)
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("long_values", 2l, 1l, 3l, -4l)
+ .array("int_values", 2, 1, 3, -4)
+ .array("short_values", 2, 1, 3, -4)
+ .array("byte_values", 2, 1, 3, -4)
+ .array("float_values", 2f, 1f, 3f, -4f)
+ .array("double_values", 2d, 1d, 3d, -4d)
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(-4l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(1l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(7l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(20l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(10l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(3l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode("sum"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(53l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(24l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(-4f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(1f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(7f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(20f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(10f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(3f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(-4d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(1d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(7d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(20d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(10d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(3d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("!4"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("01"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("07"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ @Test
+ public void testSortOnRareField() throws ElasticsearchException, IOException {
+ prepareCreate("test")
+ .setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+
+
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("10"));
+
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(2)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ public void testSortMetaField() throws Exception {
+ final boolean idDocValues = maybeDocValues();
+ final boolean timestampDocValues = maybeDocValues();
+ prepareCreate("test")
+ .addMapping("typ", XContentFactory.jsonBuilder().startObject().startObject("typ")
+ .startObject("_uid").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
+ .startObject("_id").field("index", !idDocValues || randomBoolean() ? "not_analyzed" : "no").startObject("fielddata").field("format", idDocValues ? "doc_values" : null).endObject().endObject()
+ .startObject("_timestamp").field("enabled", true).field("store", true).field("index", !timestampDocValues || randomBoolean() ? "not_analyzed" : "no").startObject("fielddata").field("format", timestampDocValues ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+ final int numDocs = atLeast(10);
+ IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ indexReqs[i] = client().prepareIndex("test", "typ", Integer.toString(i)).setTimestamp(Integer.toString(randomInt(1000))).setSource();
+ }
+ indexRandom(true, indexReqs);
+
+ SortOrder order = randomFrom(SortOrder.values());
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_uid", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().hits();
+ BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef uid = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(uid) : greaterThan(uid));
+ previous = uid;
+ }
+
+ /*searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_id", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef id = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id));
+ previous = id;
+ }*/
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_timestamp", order)
+ .addField("_timestamp")
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ Long previousTs = order == SortOrder.ASC ? 0 : Long.MAX_VALUE;
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHitField timestampField = hits[i].getFields().get("_timestamp");
+ Long timestamp = timestampField.<Long>getValue();
+ assertThat(previousTs, order == SortOrder.ASC ? lessThanOrEqualTo(timestamp) : greaterThanOrEqualTo(timestamp));
+ previousTs = timestamp;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java b/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
new file mode 100644
index 0000000..de30d6c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.source;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+public class SourceFetchingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSourceDefaultBehavior() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field", "value");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").addField("bla").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").addField("_source").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").addPartialField("test", "field", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ }
+
+ @Test
+ public void testSourceFiltering() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value", "field2", "value2").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").setFetchSource(false).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").setFetchSource(true).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").setFetchSource("field1", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ response = client().prepareSearch("test").setFetchSource("hello", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(0));
+
+ response = client().prepareSearch("test").setFetchSource(new String[]{"*"}, new String[]{"field2"}).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
new file mode 100644
index 0000000..c4778d5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.stats;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.search.stats.SearchStats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SearchStatsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void testSimpleStats() throws Exception {
+ // clear all stats first
+ client().admin().indices().prepareStats().clear().execute().actionGet();
+ createIndex("test1");
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (i == 10) {
+ refresh();
+ }
+ }
+ createIndex("test2");
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test2", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (i == 10) {
+ refresh();
+ }
+ }
+ cluster().ensureAtMostNumNodes(numAssignedShards("test1", "test2"));
+ for (int i = 0; i < 200; i++) {
+ client().prepareSearch().setQuery(QueryBuilders.termQuery("field", "value")).setStats("group1", "group2").execute().actionGet();
+ }
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), nullValue());
+
+ indicesStats = client().admin().indices().prepareStats().setGroups("group1").execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), notNullValue());
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchTimeInMillis(), greaterThan(0l));
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ NodeStats[] nodes = nodeStats.getNodes();
+ Set<String> nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2");
+ int num = 0;
+ for (NodeStats stat : nodes) {
+ Stats total = stat.getIndices().getSearch().getTotal();
+ if (nodeIdsWithIndex.contains(stat.getNode().getId())) {
+ assertThat(total.getQueryCount(), greaterThan(0l));
+ assertThat(total.getQueryTimeInMillis(), greaterThan(0l));
+ num++;
+ } else {
+ assertThat(total.getQueryCount(), equalTo(0l));
+ assertThat(total.getQueryTimeInMillis(), equalTo(0l));
+ }
+ }
+
+ assertThat(num, greaterThan(0));
+
+ }
+
+ private Set<String> nodeIdsWithIndex(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ Set<String> nodes = new HashSet<String>();
+ for (ShardIterator shardIterator : allAssignedShardsGrouped) {
+ for (ShardRouting routing : shardIterator.asUnordered()) {
+ if (routing.active()) {
+ nodes.add(routing.currentNodeId());
+ }
+
+ }
+ }
+ return nodes;
+ }
+
+ @Test
+ public void testOpenContexts() {
+ createIndex("test1");
+ for (int i = 0; i < 50; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(5)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo((long)numAssignedShards("test1")));
+
+ // scroll, but with no timeout (so no context)
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).execute().actionGet();
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+ }
+
+ protected int numAssignedShards(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ return allAssignedShardsGrouped.size();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
new file mode 100644
index 0000000..5e77faf
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
@@ -0,0 +1,1111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.MapperException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
+
+ private final String INDEX = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String TYPE = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String FIELD = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+
+ @Test
+ public void testSimple() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ refresh();
+
+ assertSuggestionsNotInOrder("f", "Foo Fighters", "Firestarter", "Foo Fighters Generator", "Foo Fighters Learn to Fly");
+ assertSuggestionsNotInOrder("t", "The Prodigy", "Turbonegro", "Turbonegro Get it on", "The Prodigy Firestarter");
+ }
+
+ @Test
+ public void testSuggestFieldWithPercolateApi() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices(INDEX).setDocumentType(TYPE)
+ .setGetRequest(Requests.getRequest(INDEX).type(TYPE).id("1"))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testBasicPrefixSuggestion() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+ for (int i = 0; i < 2; i++) {
+ createData(i == 0);
+ assertSuggestions("f", "Firestarter - The Prodigy", "Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("t", "The Prodigy", "Firestarter - The Prodigy", "Get it on - Turbonegro", "Turbonegro");
+ }
+ }
+
+ @Test
+ public void testThatWeightsAreWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ List<String> similarNames = Lists.newArrayList("the", "The Prodigy", "The Verve", "The the");
+ // the weight is 1000 divided by string length, so the results are easy to to check
+ for (String similarName : similarNames) {
+ client().prepareIndex(INDEX, TYPE, similarName).setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(similarName).endArray()
+ .field("weight", 1000 / similarName.length())
+ .endObject().endObject()
+ ).get();
+ }
+
+ refresh();
+
+ assertSuggestions("the", "the", "The the", "The Verve", "The Prodigy");
+ }
+
+ @Test
+ public void testThatWeightMustBeAnInteger() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ try {
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("sth").endArray()
+ .field("weight", 2.5)
+ .endObject().endObject()
+ ).get();
+ fail("Indexing with a float weight was successful, but should not be");
+ } catch (MapperParsingException e) {
+ assertThat(ExceptionsHelper.detailedMessage(e), containsString("2.5"));
+ }
+ }
+
+ @Test
+ public void testThatInputCanBeAStringInsteadOfAnArray() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .field("input", "Foo Fighters")
+ .field("output", "Boo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Boo Fighters");
+ }
+
+ @Test
+ public void testThatPayloadsAreArbitraryJsonObjects() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startObject("payload").field("foo", "bar").startArray("test").value("spam").value("eggs").endArray().endObject()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ // parse JSON
+ Map<String, Object> jsonMap = prefixOption.getPayloadAsMap();
+ assertThat(jsonMap.size(), is(2));
+ assertThat(jsonMap.get("foo").toString(), is("bar"));
+ assertThat(jsonMap.get("test"), is(instanceOf(List.class)));
+ List<String> listValues = (List<String>) jsonMap.get("test");
+ assertThat(listValues, hasItems("spam", "eggs"));
+ }
+
+ @Test
+ public void testPayloadAsNumeric() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", 1)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsLong(), equalTo(1l));
+ }
+
+ @Test
+ public void testPayloadAsString() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", "test")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsString(), equalTo("test"));
+ }
+
+ @Test(expected = MapperException.class)
+ public void testThatExceptionIsThrownWhenPayloadsAreDisabledButInIndexRequest() throws Exception {
+ completionMappingBuilder.payloads(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startArray("payload").value("spam").value("eggs").endArray()
+ .endObject().endObject()
+ ).get();
+ }
+
+ @Test
+ public void testDisabledPreserveSeperators() throws Exception {
+ completionMappingBuilder.preserveSeparators(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("weight", 10)
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .field("weight", 20)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof", "Foo Fighters");
+ }
+
+ @Test
+ public void testEnabledPreserveSeperators() throws Exception {
+ completionMappingBuilder.preserveSeparators(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof");
+ }
+
+ @Test
+ public void testThatMultipleInputsAreSupported() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").value("Fu Fighters").endArray()
+ .field("output", "The incredible Foo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foo", "The incredible Foo Fighters");
+ assertSuggestions("fu", "The incredible Foo Fighters");
+ }
+
+ @Test
+ public void testThatShortSyntaxIsWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startArray(FIELD)
+ .value("The Prodigy Firestarter").value("Firestarter")
+ .endArray().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("t", "The Prodigy Firestarter");
+ assertSuggestions("f", "Firestarter");
+ }
+
+ @Test
+ public void testThatDisablingPositionIncrementsWorkForStopwords() throws Exception {
+ // analyzer which removes stopwords... so may not be the simple one
+ completionMappingBuilder.searchAnalyzer("classic").indexAnalyzer("classic").preservePositionIncrements(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("b", "The Beatles");
+ }
+
+ @Test
+ public void testThatSynonymsWork() throws Exception {
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put("analysis.analyzer.suggest_analyzer_synonyms.type", "custom")
+ .put("analysis.analyzer.suggest_analyzer_synonyms.tokenizer", "standard")
+ .putArray("analysis.analyzer.suggest_analyzer_synonyms.filter", "standard", "lowercase", "my_synonyms")
+ .put("analysis.filter.my_synonyms.type", "synonym")
+ .putArray("analysis.filter.my_synonyms.synonyms", "foo,renamed");
+ completionMappingBuilder.searchAnalyzer("suggest_analyzer_synonyms").indexAnalyzer("suggest_analyzer_synonyms");
+ createIndexAndMappingAndSettings(settingsBuilder, completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // get suggestions for renamed
+ assertSuggestions("r", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldTypeWorks() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // The path can't be changed / upgraded
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldsWorks() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // The path can't be changed / upgraded
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .field("path", "just_name") // Need to specify path again, to make sure that the `path` is known when this mapping is parsed and turned into DocumentMapper that we merge with.
+ .startObject("fields")
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "path_type issue")
+ // If the path_type is set to `just_name` and the multi field is updated (for example another multi field is added)
+ // then if the path isn't specified again the path_type isn't taken into account and full path names are generated.
+ public void testThatUpgradeToMultiFieldWorks_bug() throws Exception {
+ Settings.Builder settingsBuilder = createDefaultSettings();
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).setSettings(settingsBuilder).get();
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterWorks() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirv").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsEditDistances() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // edit distance 1
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Norw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // edit distance 2
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Norw").size(10).setFuzziness(Fuzziness.TWO)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsTranspositions() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(false).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(true).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nriva").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nrivan").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirw").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("Nirvo").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterIsUnicodeAware() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("ööööö").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // suggestion with a character, which needs unicode awareness
+ CompletionSuggestionFuzzyBuilder completionSuggestionBuilder =
+ new CompletionSuggestionFuzzyBuilder("foo").field(FIELD).text("öööи").size(10).setUnicodeAware(true);
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+
+ // removing unicode awareness leads to no result
+ completionSuggestionBuilder.setUnicodeAware(false);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // increasing edit distance instead of unicode awareness works again, as this is only a single character
+ completionSuggestionBuilder.setFuzziness(Fuzziness.TWO);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+ }
+
+ @Test
+ public void testThatStatsAreWorking() throws Exception {
+ String otherField = "testOtherField";
+
+ client().admin().indices().prepareDelete("_all").get();
+ client().admin().indices().prepareCreate(INDEX)
+ .setSettings(createDefaultSettings())
+ .get();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .startObject(otherField)
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ // Index two entities
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()).get();
+ client().prepareIndex(INDEX, TYPE, "2").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()).get();
+
+ // Get all stats
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).get();
+ CompletionStats completionStats = indicesStatsResponse.getIndex(INDEX).getPrimaries().completion;
+ assertThat(completionStats, notNullValue());
+ long totalSizeInBytes = completionStats.getSizeInBytes();
+ assertThat(totalSizeInBytes, is(greaterThan(0L)));
+
+ IndicesStatsResponse singleFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(FIELD).get();
+ long singleFieldSizeInBytes = singleFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(FIELD);
+ IndicesStatsResponse otherFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(otherField).get();
+ long otherFieldSizeInBytes = otherFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(otherField);
+ assertThat(singleFieldSizeInBytes + otherFieldSizeInBytes, is(totalSizeInBytes));
+
+ // regexes
+ IndicesStatsResponse regexFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields("*").get();
+ long regexSizeInBytes = regexFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get("*");
+ assertThat(regexSizeInBytes, is(totalSizeInBytes));
+ }
+
+ @Test
+ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+ try {
+ client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).execute().actionGet();
+ fail("Expected an exception due to trying to sort on completion field, but did not happen");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status().getStatus(), is(400));
+ assertThat(e.getMessage(), containsString("Sorting not supported for field[" + FIELD + "]"));
+ }
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ ImmutableSettings.Builder settingsBuilder = settingsBuilder()
+ .put("index.analysis.analyzer.stoptest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.stoptest.filter", "standard", "suggest_stop_filter")
+ .put("index.analysis.filter.suggest_stop_filter.type", "stop")
+ .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false);
+
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ completionMappingBuilder.preserveSeparators(true).preservePositionIncrements(true);
+ completionMappingBuilder.searchAnalyzer("stoptest");
+ completionMappingBuilder.indexAnalyzer("simple");
+ createIndexAndMappingAndSettings(settingsBuilder, completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().field(FIELD, "Feed trolls").endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().field(FIELD, "Feed the trolls").endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fe", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fee", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed t", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed the", "Feed the trolls");
+ // stop word complete, gets ignored on query time, makes it "feed" only
+ assertSuggestions("feed the ", "Feed the trolls", "Feed trolls");
+ // stopword gets removed, but position increment kicks in, which doesnt work for the prefix suggester
+ assertSuggestions("feed the t");
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() throws Exception {
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("FRIGGININVALID").value("Nirvana").endArray()
+ .endObject().endObject()).get();
+ }
+
+
+ public void assertSuggestions(String suggestion, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder(suggestionName).field(FIELD).text(suggestion).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, suggestionName, suggestions);
+ }
+
+ public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder(suggestionName).field(FIELD).text(suggestString).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, false, suggestionName, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, String name, String... suggestions) {
+ assertSuggestions(suggestResponse, true, name, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, boolean suggestionOrderStrict, String name, String... suggestions) {
+ assertNoFailures(suggestResponse);
+
+ List<String> suggestionNames = Lists.newArrayList();
+ for (Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestion : Lists.newArrayList(suggestResponse.getSuggest().iterator())) {
+ suggestionNames.add(suggestion.getName());
+ }
+ String expectFieldInResponseMsg = String.format(Locale.ROOT, "Expected suggestion named %s in response, got %s", name, suggestionNames);
+ assertThat(expectFieldInResponseMsg, suggestResponse.getSuggest().getSuggestion(name), is(notNullValue()));
+
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> suggestion = suggestResponse.getSuggest().getSuggestion(name);
+
+ List<String> suggestionList = getNames(suggestion.getEntries().get(0));
+ List<Suggest.Suggestion.Entry.Option> options = suggestion.getEntries().get(0).getOptions();
+
+ String assertMsg = String.format(Locale.ROOT, "Expected options %s length to be %s, but was %s", suggestionList, suggestions.length, options.size());
+ assertThat(assertMsg, options.size(), is(suggestions.length));
+ if (suggestionOrderStrict) {
+ for (int i = 0; i < suggestions.length; i++) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s in list %s to be [%s] score: %s", i, suggestionList, suggestions[i], options.get(i).getScore());
+ assertThat(errMsg, options.get(i).getText().toString(), is(suggestions[i]));
+ }
+ } else {
+ for (String expectedSuggestion : suggestions) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s to be in list %s", expectedSuggestion, suggestionList);
+ assertThat(errMsg, suggestionList, hasItem(expectedSuggestion));
+ }
+ }
+ }
+
+ private List<String> getNames(Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> suggestEntry) {
+ List<String> names = Lists.newArrayList();
+ for (Suggest.Suggestion.Entry.Option entry : suggestEntry.getOptions()) {
+ names.add(entry.getText().string());
+ }
+ return names;
+ }
+
+ private void createIndexAndMappingAndSettings(Settings.Builder settingsBuilder, CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ client().admin().indices().prepareCreate(INDEX)
+ .setSettings(settingsBuilder)
+ .get();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("index_analyzer", completionMappingBuilder.indexAnalyzer)
+ .field("search_analyzer", completionMappingBuilder.searchAnalyzer)
+ .field("payloads", completionMappingBuilder.payloads)
+ .field("preserve_separators", completionMappingBuilder.preserveSeparators)
+ .field("preserve_position_increments", completionMappingBuilder.preservePositionIncrements)
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+ ensureYellow();
+ }
+
+ private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ createIndexAndMappingAndSettings(createDefaultSettings(), completionMappingBuilder);
+ }
+
+ private ImmutableSettings.Builder createDefaultSettings() {
+ int randomShardNumber = between(1, 5);
+ int randomReplicaNumber = between(0, cluster().size() - 1);
+ return settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, randomShardNumber).put(SETTING_NUMBER_OF_REPLICAS, randomReplicaNumber);
+ }
+
+ private void createData(boolean optimize) throws IOException, InterruptedException, ExecutionException {
+ String[][] input = {{"Foo Fighters"}, {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}};
+ String[] surface = {"Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters", "The Prodigy", "Firestarter - The Prodigy", "Turbonegro", "Get it on - Turbonegro"};
+ int[] weight = {10, 9, 8, 12, 11, 6, 7};
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[input.length];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", 1) // WE FORCEFULLY INDEX A BOGUS WEIGHT
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ for (int i = 0; i < builders.length; i++) { // add them again to make sure we deduplicate on the surface form
+ builders[i] = client().prepareIndex(INDEX, TYPE, "n" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", weight[i])
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ client().admin().indices().prepareRefresh(INDEX).execute().actionGet();
+ if (optimize) {
+ // make sure merging works just fine
+ client().admin().indices().prepareFlush(INDEX).execute().actionGet();
+ client().admin().indices().prepareOptimize(INDEX).execute().actionGet();
+ }
+ }
+
+ @Test // see #3555
+ public void testPrunedSegments() throws IOException {
+ createIndexAndMappingAndSettings(settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0), completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get(); // we have 2 docs in a segment...
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ refresh();
+ // update the first one and then merge.. the target segment will have no value in FIELD
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get();
+ actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ refresh();
+
+ assertSuggestions("b");
+ assertThat(2l, equalTo(client().prepareCount(INDEX).get().getCount()));
+ for (IndexShardSegments seg : client().admin().indices().prepareSegments().get().getIndices().get(INDEX)) {
+ ShardSegments[] shards = seg.getShards();
+ for (ShardSegments shardSegments : shards) {
+ assertThat(1, equalTo(shardSegments.getSegments().size()));
+ }
+ }
+ }
+
+ @Test
+ public void testMaxFieldLength() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ int iters = atLeast(10);
+ for (int i = 0; i < iters; i++) {
+ int maxInputLen = between(3, 50);
+ String str = replaceReservedChars(randomRealisticUnicodeOfCodepointLengthBetween(maxInputLen + 1, atLeast(maxInputLen + 2)), (char) 0x01);
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("max_input_length", maxInputLen)
+ // upgrade mapping each time
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(str).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ // need to flush and refresh, because we keep changing the same document
+ // we have to make sure that segments without any live documents are deleted
+ flushAndRefresh();
+ int prefixLen = CompletionFieldMapper.correctSubStringLen(str, between(1, maxInputLen - 1));
+ assertSuggestions(str.substring(0, prefixLen), "foobar");
+ if (maxInputLen + 1 < str.length()) {
+ int offset = Character.isHighSurrogate(str.charAt(maxInputLen - 1)) ? 2 : 1;
+ int correctSubStringLen = CompletionFieldMapper.correctSubStringLen(str, maxInputLen + offset);
+ String shortenedSuggestion = str.substring(0, correctSubStringLen);
+ assertSuggestions(shortenedSuggestion);
+ }
+ }
+ }
+
+ @Test
+ // see #3596
+ public void testVeryLongInput() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String longString = replaceReservedChars(randomRealisticUnicodeOfLength(atLeast(5000)), (char) 0x01);
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(longString).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+
+ }
+
+ // see #3648
+ @Test(expected = MapperParsingException.class)
+ public void testReservedChars() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ ElasticsearchAssertions.assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String string = "foo" + (char) 0x00 + "bar";
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(string).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ }
+
+ private static String replaceReservedChars(String input, char replacement) {
+ char[] charArray = input.toCharArray();
+ for (int i = 0; i < charArray.length; i++) {
+ if (CompletionFieldMapper.isReservedChar(charArray[i])) {
+ charArray[i] = replacement;
+ }
+ }
+ return new String(charArray);
+ }
+
+ private static class CompletionMappingBuilder {
+ private String searchAnalyzer = "simple";
+ private String indexAnalyzer = "simple";
+ private Boolean payloads = getRandom().nextBoolean();
+ private Boolean preserveSeparators = getRandom().nextBoolean();
+ private Boolean preservePositionIncrements = getRandom().nextBoolean();
+
+ public CompletionMappingBuilder searchAnalyzer(String searchAnalyzer) {
+ this.searchAnalyzer = searchAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder indexAnalyzer(String indexAnalyzer) {
+ this.indexAnalyzer = indexAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder payloads(Boolean payloads) {
+ this.payloads = payloads;
+ return this;
+ }
+ public CompletionMappingBuilder preserveSeparators(Boolean preserveSeparators) {
+ this.preserveSeparators = preserveSeparators;
+ return this;
+ }
+ public CompletionMappingBuilder preservePositionIncrements(Boolean preservePositionIncrements) {
+ this.preservePositionIncrements = preservePositionIncrements;
+ return this;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java b/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
new file mode 100644
index 0000000..241e46c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.synonym.SynonymMap;
+import org.apache.lucene.analysis.synonym.SynonymMap.Builder;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream.ByteTermAttribute;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompletionTokenStreamTest extends ElasticsearchTokenStreamTestCase {
+
+ final XAnalyzingSuggester suggester = new XAnalyzingSuggester(new SimpleAnalyzer(TEST_VERSION_CURRENT));
+
+ @Test
+ public void testSuggestTokenFilter() throws Exception {
+ TokenStream tokenStream = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10"}, new int[] { 1 }, null, null);
+ }
+
+ @Test
+ public void testSuggestTokenFilterWithSynonym() throws Exception {
+ Builder builder = new SynonymMap.Builder(true);
+ builder.add(new CharsRef("mykeyword"), new CharsRef("mysynonym"), true);
+
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(filter, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mysynonym", "mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10", "Surface keyword|friggin payload|10"}, new int[] { 2, 0 }, null, null);
+ }
+
+ @Test
+ public void testValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 8 ; i++) {
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader(valueBuilder.toString()), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ ByteTermAttribute attr = suggestTokenStream.addAttribute(ByteTermAttribute.class);
+ PositionIncrementAttribute posAttr = suggestTokenStream.addAttribute(PositionIncrementAttribute.class);
+ int maxPos = 0;
+ int count = 0;
+ while(suggestTokenStream.incrementToken()) {
+ count++;
+ assertNotNull(attr.getBytesRef());
+ assertTrue(attr.getBytesRef().length > 0);
+ maxPos += posAttr.getPositionIncrement();
+ }
+ suggestTokenStream.close();
+ assertEquals(count, 256);
+ assertEquals(count, maxPos);
+
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testInValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 9 ; i++) { // 9 -> expands to 512
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(new StringReader(valueBuilder.toString()), MockTokenizer.WHITESPACE, true);
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ suggestTokenStream.incrementToken();
+ suggestTokenStream.close();
+
+ }
+
+ @Test
+ public void testSuggestTokenFilterProperlyDelegateInputStream() throws Exception {
+ TokenStream tokenStream = new MockTokenizer(new StringReader("mykeyword"), MockTokenizer.WHITESPACE, true);
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class);
+ BytesRef ref = termAtt.getBytesRef();
+ assertNotNull(ref);
+ suggestTokenStream.reset();
+
+ while (suggestTokenStream.incrementToken()) {
+ termAtt.fillBytesRef();
+ assertThat(ref.utf8ToString(), equalTo("mykeyword"));
+ }
+ suggestTokenStream.end();
+ suggestTokenStream.close();
+ }
+
+
+ public final static class ByteTermAttrToCharTermAttrFilter extends TokenFilter {
+ private ByteTermAttribute byteAttr = addAttribute(ByteTermAttribute.class);
+ private PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ private TypeAttribute type = addAttribute(TypeAttribute.class);
+ private CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);
+ protected ByteTermAttrToCharTermAttrFilter(TokenStream input) {
+ super(input);
+ }
+
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (input.incrementToken()) {
+ BytesRef bytesRef = byteAttr.getBytesRef();
+ // we move them over so we can assert them more easily in the tests
+ type.setType(payload.getPayload().utf8ToString());
+ return true;
+ }
+ return false;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
new file mode 100644
index 0000000..476fe3c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.CharsRef;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class CustomSuggester extends Suggester<CustomSuggester.CustomSuggestionsContext> {
+
+
+ // This is a pretty dumb implementation which returns the original text + fieldName + custom config option + 12 or 123
+ @Override
+ public Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(String name, CustomSuggestionsContext suggestion, IndexReader indexReader, CharsRef spare) throws IOException {
+ // Get the suggestion context
+ String text = suggestion.getText().utf8ToString();
+
+ // create two suggestions with 12 and 123 appended
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>>(name, suggestion.getSize());
+
+ String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>(new StringText(firstSuggestion), 0, text.length() + 2);
+ response.addTerm(resultEntry12);
+
+ String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>(new StringText(secondSuggestion), 0, text.length() + 3);
+ response.addTerm(resultEntry123);
+
+ return response;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"custom"};
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new SuggestContextParser() {
+ @Override
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService) throws IOException {
+ Map<String, Object> options = parser.map();
+ CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options);
+ suggestionContext.setField((String) options.get("field"));
+ return suggestionContext;
+ }
+ };
+ }
+
+ public static class CustomSuggestionsContext extends SuggestionSearchContext.SuggestionContext {
+
+ public Map<String, Object> options;
+
+ public CustomSuggestionsContext(Suggester suggester, Map<String, Object> options) {
+ super(suggester);
+ this.options = options;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
new file mode 100644
index 0000000..a54421c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+/**
+ *
+ */
+public class CustomSuggesterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-suggester";
+ }
+
+ @Override
+ public String description() {
+ return "Custom suggester to test pluggable implementation";
+ }
+
+ public void onModule(SuggestModule suggestModule) {
+ suggestModule.registerSuggester(CustomSuggester.class);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
new file mode 100644
index 0000000..ee78c1a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class CustomSuggesterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("plugin.types", CustomSuggesterPlugin.class.getName()).put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ public void testThatCustomSuggestersCanBeRegisteredAndWork() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("name", "arbitrary content")
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+ ensureYellow();
+
+ String randomText = randomAsciiOfLength(10);
+ String randomField = randomAsciiOfLength(10);
+ String randomSuffix = randomAsciiOfLength(10);
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("test").setFrom(0).setSize(1);
+ XContentBuilder query = jsonBuilder().startObject()
+ .startObject("suggest")
+ .startObject("someName")
+ .field("text", randomText)
+ .startObject("custom")
+ .field("field", randomField)
+ .field("suffix", randomSuffix)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ searchRequestBuilder.setExtraSource(query.bytes());
+
+ SearchResponse searchResponse = searchRequestBuilder.execute().actionGet();
+
+ List<Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestions = Lists.newArrayList(searchResponse.getSuggest().getSuggestion("someName").iterator());
+ assertThat(suggestions, hasSize(2));
+ assertThat(suggestions.get(0).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-12", randomText, randomField, randomSuffix)));
+ assertThat(suggestions.get(1).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-123", randomText, randomField, randomSuffix)));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
new file mode 100644
index 0000000..6a1fa25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
@@ -0,0 +1,977 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Resources;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.DirectCandidateGenerator;
+import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.search.suggest.SuggestBuilder.phraseSuggestion;
+import static org.elasticsearch.search.suggest.SuggestBuilder.termSuggestion;
+import static org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.candidateGenerator;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Integration tests for term and phrase suggestions. Many of these tests many requests that vary only slightly from one another. Where
+ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that
+ * request, modify again, request again, etc. This makes it very obvious what changes between requests.
+ */
+public class SuggestSearchTests extends ElasticsearchIntegrationTest {
+
+
+ @Test // see #3196
+ public void testSuggestAcrossMultipleIndices() throws IOException {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, between(1, 5),
+ SETTING_NUMBER_OF_REPLICAS, between(0, 1)).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ logger.info("--> run suggestions with one index");
+ searchSuggest(client(), termSuggest);
+ prepareCreate("test_1").setSettings(
+ SETTING_NUMBER_OF_SHARDS, between(1, 5),
+ SETTING_NUMBER_OF_REPLICAS, between(0, 1)).get();
+ ensureGreen();
+
+ index("test_1", "type1", "1", "text", "ab cd");
+ index("test_1", "type1", "2", "text", "aa cd");
+ index("test_1", "type1", "3", "text", "ab bd");
+ index("test_1", "type1", "4", "text", "ab cc");
+ refresh();
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with two indices");
+ searchSuggest(client(), termSuggest);
+
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("text").field("type", "string").field("analyzer", "keyword").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test_2").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))
+ ).addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test_2", "type1", "1", "text", "ab cd");
+ index("test_2", "type1", "2", "text", "aa cd");
+ index("test_2", "type1", "3", "text", "ab bd");
+ index("test_2", "type1", "4", "text", "ab cc");
+ index("test_2", "type1", "1", "text", "abcd");
+ index("test_2", "type1", "2", "text", "aacd");
+ index("test_2", "type1", "3", "text", "abbd");
+ index("test_2", "type1", "4", "text", "abcc");
+ refresh();
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with three indices");
+ try {
+ searchSuggest(client(), termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class));
+ assertThat(ex.getCause().getMessage(),
+ anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ } catch (ElasticsearchIllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ }
+
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ABCD")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with four indices");
+ try {
+ searchSuggest(client(), termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class));
+ assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ } catch (ElasticsearchIllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ }
+
+
+ }
+
+ @Test // see #3037
+ public void testSuggestModes() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("name_shingled")
+ .field("type", "string")
+ .field("index_analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+
+ index("test", "type1", "1", "name", "I like iced tea");
+ index("test", "type1", "2", "name", "I like tea.");
+ index("test", "type1", "3", "name", "I like ice cream.");
+ refresh();
+
+ DirectCandidateGenerator generator = candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2);
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name_shingled")
+ .addCandidateGenerator(generator)
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "did_you_mean", "iced tea");
+
+ generator.suggestMode(null);
+ searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "did_you_mean");
+ }
+
+ @Test // see #2729
+ public void testSizeOneShard() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ for (int i = 0; i < 15; i++) {
+ index("test", "type1", Integer.toString(i), "text", "abc" + i);
+ }
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggestion = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text")
+ .size(10);
+ Suggest suggest = searchSuggest(client(), termSuggestion);
+ assertSuggestion(suggest, 0, "test", 10, "abc0");
+
+ termSuggestion.text("abcd").shardSize(5);
+ suggest = searchSuggest(client(), termSuggestion);
+ assertSuggestion(suggest, 0, "test", 5, "abc0");
+ }
+
+ @Test
+ public void testUnmappedField() throws IOException, InterruptedException, ExecutionException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1,5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("name_shingled")
+ .field("type", "string")
+ .field("index_analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1").setSource("name", "I like iced tea"),
+ client().prepareIndex("test", "type1").setSource("name", "I like tea."),
+ client().prepareIndex("test", "type1").setSource("name", "I like ice cream."));
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name_shingled")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2))
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest(client(), "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, 0, "did_you_mean", "iced tea");
+
+ phraseSuggestion.field("nosuchField");
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest(client(), termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd", "abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest(client(), termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd","abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testEmpty() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "type1", "1", "foo", "bar");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest(client(), termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest(client(), termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testWithMultipleCommands() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "typ1", "1", "field1", "prefix_abcd", "field2", "prefix_efgh");
+ index("test", "typ1", "2", "field1", "prefix_aacd", "field2", "prefix_eeeh");
+ index("test", "typ1", "3", "field1", "prefix_abbd", "field2", "prefix_efff");
+ index("test", "typ1", "4", "field1", "prefix_abcc", "field2", "prefix_eggg");
+ refresh();
+
+ Suggest suggest = searchSuggest(client(),
+ termSuggestion("size1")
+ .size(1).text("prefix_abcd").maxTermFreq(10).prefixLength(1).minDocFreq(0)
+ .field("field1").suggestMode("always"),
+ termSuggestion("field2")
+ .field("field2").text("prefix_eeeh prefix_efgh")
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"),
+ termSuggestion("accuracy")
+ .field("field2").text("prefix_efgh").setAccuracy(1f)
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"));
+ assertSuggestion(suggest, 0, "size1", "prefix_aacd");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(0).getText().string(), equalTo("prefix_eeeh"));
+ assertSuggestion(suggest, 0, "field2", "prefix_efgh");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(1).getText().string(), equalTo("prefix_efgh"));
+ assertSuggestion(suggest, 1, "field2", "prefix_eeeh", "prefix_efff", "prefix_eggg");
+ assertSuggestionSize(suggest, 0, 0, "accuracy");
+ }
+
+ @Test
+ public void testSizeAndSort() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 5,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ Map<String, Integer> termsAndDocCount = new HashMap<String, Integer>();
+ termsAndDocCount.put("prefix_aaad", 20);
+ termsAndDocCount.put("prefix_abbb", 18);
+ termsAndDocCount.put("prefix_aaca", 16);
+ termsAndDocCount.put("prefix_abba", 14);
+ termsAndDocCount.put("prefix_accc", 12);
+ termsAndDocCount.put("prefix_addd", 10);
+ termsAndDocCount.put("prefix_abaa", 8);
+ termsAndDocCount.put("prefix_dbca", 6);
+ termsAndDocCount.put("prefix_cbad", 4);
+ termsAndDocCount.put("prefix_aacd", 1);
+ termsAndDocCount.put("prefix_abcc", 1);
+ termsAndDocCount.put("prefix_accd", 1);
+
+ for (Map.Entry<String, Integer> entry : termsAndDocCount.entrySet()) {
+ for (int i = 0; i < entry.getValue(); i++) {
+ index("test", "type1", entry.getKey() + i, "field1", entry.getKey());
+ }
+ }
+ refresh();
+
+ Suggest suggest = searchSuggest(client(), "prefix_abcd",
+ termSuggestion("size3SortScoreFirst")
+ .size(3).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortScoreFirst")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always").shardSize(50),
+ termSuggestion("size3SortScoreFirstMaxEdits1")
+ .maxEdits(1)
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortFrequencyFirst")
+ .size(10).sort("frequency").shardSize(1000)
+ .minDocFreq(0).field("field1").suggestMode("always"));
+
+ // The commented out assertions fail sometimes because suggestions are based off of shard frequencies instead of index frequencies.
+ assertSuggestion(suggest, 0, "size3SortScoreFirst", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortScoreFirst", 10, "prefix_aacd", "prefix_abcc", "prefix_accd" /*, "prefix_aaad" */);
+ assertSuggestion(suggest, 0, "size3SortScoreFirstMaxEdits1", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortFrequencyFirst", "prefix_aaad", "prefix_abbb", "prefix_aaca", "prefix_abba",
+ "prefix_accc", "prefix_addd", "prefix_abaa", "prefix_dbca", "prefix_cbad", "prefix_aacd");
+
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_abcc"));
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_accd"));
+ }
+
+ @Test // see #2817
+ public void testStopwordsOnlyPhraseSuggest() throws ElasticsearchException, IOException {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ index("test", "typ1", "1", "body", "this is a test");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "a an the",
+ phraseSuggestion("simple_phrase").field("body").gramSize(1)
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+ }
+
+ @Test
+ public void testPrefixLength() throws ElasticsearchException, IOException { // Stopped here
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("body_reverse").field("type", "string").field("analyzer", "reverse").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test", "type1", "1", "body", "hello world");
+ index("test", "type1", "2", "body", "hello world");
+ index("test", "type1", "3", "body", "hello words");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(4).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello words");
+
+ searchSuggest = searchSuggest(client(), "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(2).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello world");
+ }
+
+
+ @Test
+ @Slow
+ public void testMarvelHerosPhraseSuggest() throws ElasticsearchException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body").
+ field("type", "string").
+ field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse").
+ field("type", "string").
+ field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram").
+ field("type", "string").
+ field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ ElasticsearchAssertions.assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "body_reverse", line, "bigram", line);
+ }
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("simple_phrase")
+ .field("bigram").gramSize(2).analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1);
+ Suggest searchSuggest = searchSuggest(client(), "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("american ame"));
+
+ phraseSuggest.realWordErrorLikelihood(0.95f);
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ // Check the "text" field this one time.
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel"));
+
+ // Ask for highlighting
+ phraseSuggest.highlight("<em>", "</em>");
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getOptions().get(0).getHighlighted().string(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+
+ // pass in a correct phrase
+ phraseSuggest.highlight(null, null).confidence(0f).size(1).maxErrors(0.5f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // pass in a correct phrase - set confidence to 2
+ phraseSuggest.confidence(2f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // pass in a correct phrase - set confidence to 0.99
+ phraseSuggest.confidence(0.99f);
+ searchSuggest = searchSuggest(client(), "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ //test reverse suggestions with pre & post filter
+ phraseSuggest
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .addCandidateGenerator(candidateGenerator("body_reverse").minWordLength(1).suggestMode("always").preFilter("reverse").postFilter("reverse"));
+ searchSuggest = searchSuggest(client(), "xor the yod-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // set all mass to trigrams (not indexed)
+ phraseSuggest.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(1,0,0));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // set all mass to bigrams
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0,1,0));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // distribute mass
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ searchSuggest = searchSuggest(client(), "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+
+ // try all smoothing methods
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.Laplace(0.2));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // check tokenLimit
+ phraseSuggest.smoothingModel(null).tokenLimit(4);
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ phraseSuggest.tokenLimit(15).smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest(client(), "Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel xorr the god jewel xorr the god jewel");
+ // Check the name this time because we're repeating it which is funky
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel"));
+ }
+
+ @Test
+ public void testSizePararm() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse")
+ .field("type", "string")
+ .field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram")
+ .field("type", "string")
+ .field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ String line = "xorr the god jewel";
+ index("test", "type1", "1", "body", line, "body_reverse", line, "bigram", line);
+ line = "I got it this time";
+ index("test", "type1", "2", "body", line, "body_reverse", line, "bigram", line);
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .realWordErrorLikelihood(0.95f)
+ .field("bigram")
+ .gramSize(2)
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(1).accuracy(0.1f))
+ .smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1))
+ .maxErrors(1.0f)
+ .size(5);
+ Suggest searchSuggest = searchSuggest(client(), "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // we allow a size of 2 now on the shard generator level so "god" will be found since it's LD2
+ phraseSuggestion.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(2).accuracy(0.1f));
+ searchSuggest = searchSuggest(client(), "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ public void testPhraseBoundaryCases() throws ElasticsearchException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.analyzer.ngram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.ngram.filter", "my_shingle2", "lowercase")
+ .put("index.analysis.analyzer.myDefAnalyzer.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.myDefAnalyzer.filter", "shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.type", "shingle")
+ .put("index.analysis.filter.my_shingle2.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle2.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .startObject("ngram").field("type", "string").field("analyzer", "ngram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "bigram", line, "ngram", line);
+ }
+ refresh();
+
+ // Lets make sure some things throw exceptions
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .field("bigram")
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("does_not_exist").minWordLength(1).suggestMode("always"))
+ .realWordErrorLikelihood(0.95f)
+ .maxErrors(0.5f)
+ .size(1);
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("field does not exists");
+ } catch (SearchPhaseExecutionException e) {}
+
+ phraseSuggestion.clearCandidateGenerators().analyzer(null);
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ phraseSuggestion.analyzer("bigram");
+ try {
+ searchSuggest(client(), "Xor the Got-Jewel", 5, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ // Now we'll make sure some things don't
+ phraseSuggestion.forceUnigrams(false);
+ searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+
+ // Field doesn't produce unigrams but the analyzer does
+ phraseSuggestion.forceUnigrams(true).field("bigram").analyzer("ngram");
+ searchSuggest(client(), "Xor the Got-Jewel",
+ phraseSuggestion);
+
+ phraseSuggestion.field("ngram").analyzer("myDefAnalyzer")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"));
+ Suggest suggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+ assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggestion.analyzer(null);
+ suggest = searchSuggest(client(), "Xor the Got-Jewel", phraseSuggestion);
+ assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ public void testDifferentShardSize() throws Exception {
+ prepareCreate("text").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("text", "type1", "1").setSource("field1", "foobar1").setRouting("1"),
+ client().prepareIndex("text", "type1", "2").setSource("field1", "foobar2").setRouting("2"),
+ client().prepareIndex("text", "type1", "3").setSource("field1", "foobar3").setRouting("3"));
+
+ Suggest suggest = searchSuggest(client(), "foobar",
+ termSuggestion("simple")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"));
+ ElasticsearchAssertions.assertSuggestionSize(suggest, 0, 3, "simple");
+ }
+
+ @Test // see #3469
+ public void testShardFailures() throws IOException, InterruptedException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .field("path", "just_name")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .field("analyzer", "suggest")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type2", "3", "foo", "bar");
+ index("test", "type2", "4", "foo", "bar");
+ index("test", "type2", "5", "foo", "bar");
+ index("test", "type2", "1", "name", "Just testing the suggestions api");
+ index("test", "type2", "2", "name", "An other title about equal length");
+ // Note that the last document has to have about the same length as the other or cutoff rechecking will remove the useful suggestion.
+ refresh();
+
+ // When searching on a shard with a non existing mapping, we should fail
+ SearchRequestBuilder request = client().prepareSearch().setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("fielddoesnotexist").maxErrors(5.0f));
+ assertThrows(request, SearchPhaseExecutionException.class);
+
+ // When searching on a shard which does not hold yet any document of an existing type, we should not fail
+ SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+ ElasticsearchAssertions.assertNoFailures(searchResponse);
+ ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ @Test // see #3469
+ public void testEmptyShards() throws IOException, InterruptedException {
+ XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type1").
+ startObject("properties").
+ startObject("name").
+ field("type", "multi_field").
+ field("path", "just_name").
+ startObject("fields").
+ startObject("name").
+ field("type", "string").
+ field("analyzer", "suggest").
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject();
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true)).addMapping("type1", mappingBuilder));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type1", "1", "name", "Just testing the suggestions api");
+ index("test", "type1", "2", "name", "An other title about equal length");
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.COUNT)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ /**
+ * Searching for a rare phrase shouldn't provide any suggestions if confidence > 1. This was possible before we rechecked the cutoff
+ * score during the reduce phase. Failures don't occur every time - maybe two out of five tries but we don't repeat it to save time.
+ */
+ @Test
+ public void testSearchForRarePhrase() throws ElasticsearchException, IOException {
+ // If there isn't enough chaf per shard then shards can become unbalanced, making the cutoff recheck this is testing do more harm then good.
+ int chafPerShard = 100;
+ int numberOfShards = between(2, 5);
+
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase", "my_shingle")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ List<String> phrases = new ArrayList<String>();
+ Collections.addAll(phrases, "nobel prize", "noble gases", "somethingelse prize", "pride and joy", "notes are fun");
+ for (int i = 0; i < 8; i++) {
+ phrases.add("noble somethingelse" + i);
+ }
+ for (int i = 0; i < numberOfShards * chafPerShard; i++) {
+ phrases.add("chaff" + i);
+ }
+ for (String phrase: phrases) {
+ index("test", "type1", phrase, "body", phrase);
+ }
+ refresh();
+
+ Suggest searchSuggest = searchSuggest(client(), "nobel prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ searchSuggest = searchSuggest(client(), "noble prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestion(searchSuggest, 0, 0, "simple_phrase", "nobel prize");
+ }
+
+ protected Suggest searchSuggest(Client client, SuggestionBuilder<?>... suggestion) {
+ return searchSuggest(client(), null, suggestion);
+ }
+
+ protected Suggest searchSuggest(Client client, String suggestText, SuggestionBuilder<?>... suggestions) {
+ return searchSuggest(client(), suggestText, 0, suggestions);
+ }
+
+ protected Suggest searchSuggest(Client client, String suggestText, int expectShardsFailed, SuggestionBuilder<?>... suggestions) {
+ SearchRequestBuilder builder = client().prepareSearch().setSearchType(SearchType.COUNT);
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+ SearchResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ return actionGet.getSuggest();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java b/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
new file mode 100644
index 0000000..4a47b3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.carrotsearch.hppc.ObjectLongOpenHashMap;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.ByteSequenceOutputs;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.PairOutputs;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
+import org.apache.lucene.util.fst.PositiveIntOutputs;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.CompletionLookupProvider;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder;
+
+import java.io.IOException;
+import java.util.*;
+/**
+ * This is an older implementation of the AnalyzingCompletionLookupProvider class
+ * We use this to test for backwards compatibility in our tests, namely
+ * CompletionPostingsFormatTest
+ * This ensures upgrades between versions work smoothly
+ */
+public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvider {
+
+ // for serialization
+ public static final int SERIALIZE_PRESERVE_SEPERATORS = 1;
+ public static final int SERIALIZE_HAS_PAYLOADS = 2;
+ public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
+
+ private static final int MAX_SURFACE_FORMS_PER_ANALYZED_FORM = 256;
+ private static final int MAX_GRAPH_EXPANSIONS = -1;
+
+ public static final String CODEC_NAME = "analyzing";
+ public static final int CODEC_VERSION = 1;
+
+ private boolean preserveSep;
+ private boolean preservePositionIncrements;
+ private int maxSurfaceFormsPerAnalyzedForm;
+ private int maxGraphExpansions;
+ private boolean hasPayloads;
+ private final XAnalyzingSuggester prototype;
+
+ // important, these are the settings from the old xanalyzingsuggester
+ public static final int SEP_LABEL = 0xFF;
+ public static final int END_BYTE = 0x0;
+ public static final int PAYLOAD_SEP = '\u001f';
+
+ public AnalyzingCompletionLookupProviderV1(boolean preserveSep, boolean exactFirst, boolean preservePositionIncrements, boolean hasPayloads) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.hasPayloads = hasPayloads;
+ this.maxSurfaceFormsPerAnalyzedForm = MAX_SURFACE_FORMS_PER_ANALYZED_FORM;
+ this.maxGraphExpansions = MAX_GRAPH_EXPANSIONS;
+ int options = preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+ // needs to fixed in the suggester first before it can be supported
+ //options |= exactFirst ? XAnalyzingSuggester.EXACT_FIRST : 0;
+ prototype = new XAnalyzingSuggester(null, null, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, preservePositionIncrements,
+ null, false, 1, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+
+ @Override
+ public String getName() {
+ return "analyzing";
+ }
+
+ @Override
+ public FieldsConsumer consumer(final IndexOutput output) throws IOException {
+ CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION);
+ return new FieldsConsumer() {
+ private Map<FieldInfo, Long> fieldOffsets = new HashMap<FieldInfo, Long>();
+
+ @Override
+ public void close() throws IOException {
+ try { /*
+ * write the offsets per field such that we know where
+ * we need to load the FSTs from
+ */
+ long pointer = output.getFilePointer();
+ output.writeVInt(fieldOffsets.size());
+ for (Map.Entry<FieldInfo, Long> entry : fieldOffsets.entrySet()) {
+ output.writeString(entry.getKey().name);
+ output.writeVLong(entry.getValue());
+ }
+ output.writeLong(pointer);
+ output.flush();
+ } finally {
+ IOUtils.close(output);
+ }
+ }
+
+ @Override
+ public TermsConsumer addField(final FieldInfo field) throws IOException {
+
+ return new TermsConsumer() {
+ final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(maxSurfaceFormsPerAnalyzedForm, hasPayloads, PAYLOAD_SEP);
+ final CompletionPostingsConsumer postingsConsumer = new CompletionPostingsConsumer(AnalyzingCompletionLookupProviderV1.this, builder);
+
+ @Override
+ public PostingsConsumer startTerm(BytesRef text) throws IOException {
+ builder.startTerm(text);
+ return postingsConsumer;
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() throws IOException {
+ return BytesRef.getUTF8SortedAsUnicodeComparator();
+ }
+
+ @Override
+ public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+ builder.finishTerm(stats.docFreq); // use doc freq as a fallback
+ }
+
+ @Override
+ public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount) throws IOException {
+ /*
+ * Here we are done processing the field and we can
+ * buid the FST and write it to disk.
+ */
+ FST<Pair<Long, BytesRef>> build = builder.build();
+ assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
+ /*
+ * it's possible that the FST is null if we have 2 segments that get merged
+ * and all docs that have a value in this field are deleted. This will cause
+ * a consumer to be created but it doesn't consume any values causing the FSTBuilder
+ * to return null.
+ */
+ if (build != null) {
+ fieldOffsets.put(field, output.getFilePointer());
+ build.save(output);
+ /* write some more meta-info */
+ output.writeVInt(postingsConsumer.getMaxAnalyzedPathsForOneInput());
+ output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
+ output.writeInt(maxGraphExpansions); // can be negative
+ int options = 0;
+ options |= preserveSep ? SERIALIZE_PRESERVE_SEPERATORS : 0;
+ options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
+ options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
+ output.writeVInt(options);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ private static final class CompletionPostingsConsumer extends PostingsConsumer {
+ private final SuggestPayload spare = new SuggestPayload();
+ private AnalyzingCompletionLookupProviderV1 analyzingSuggestLookupProvider;
+ private XAnalyzingSuggester.XBuilder builder;
+ private int maxAnalyzedPathsForOneInput = 0;
+
+ public CompletionPostingsConsumer(AnalyzingCompletionLookupProviderV1 analyzingSuggestLookupProvider, XAnalyzingSuggester.XBuilder builder) {
+ this.analyzingSuggestLookupProvider = analyzingSuggestLookupProvider;
+ this.builder = builder;
+ }
+
+ @Override
+ public void startDoc(int docID, int freq) throws IOException {
+ }
+
+ @Override
+ public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException {
+ analyzingSuggestLookupProvider.parsePayload(payload, spare);
+ builder.addSurface(spare.surfaceForm, spare.payload, spare.weight);
+ // multi fields have the same surface form so we sum up here
+ maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
+ }
+
+ @Override
+ public void finishDoc() throws IOException {
+ }
+
+ public int getMaxAnalyzedPathsForOneInput() {
+ return maxAnalyzedPathsForOneInput;
+ }
+ }
+
+ ;
+
+
+ @Override
+ public LookupFactory load(IndexInput input) throws IOException {
+ CodecUtil.checkHeader(input, CODEC_NAME, CODEC_VERSION, CODEC_VERSION);
+ final Map<String, AnalyzingSuggestHolder> lookupMap = new HashMap<String, AnalyzingSuggestHolder>();
+ input.seek(input.length() - 8);
+ long metaPointer = input.readLong();
+ input.seek(metaPointer);
+ int numFields = input.readVInt();
+
+ Map<Long, String> meta = new TreeMap<Long, String>();
+ for (int i = 0; i < numFields; i++) {
+ String name = input.readString();
+ long offset = input.readVLong();
+ meta.put(offset, name);
+ }
+ long sizeInBytes = 0;
+ for (Map.Entry<Long, String> entry : meta.entrySet()) {
+ input.seek(entry.getKey());
+ FST<Pair<Long, BytesRef>> fst = new FST<Pair<Long, BytesRef>>(input, new PairOutputs<Long, BytesRef>(
+ PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
+ int maxAnalyzedPathsForOneInput = input.readVInt();
+ int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
+ int maxGraphExpansions = input.readInt();
+ int options = input.readVInt();
+ boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPERATORS) != 0;
+ boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
+ boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
+ sizeInBytes += fst.sizeInBytes();
+ lookupMap.put(entry.getValue(), new AnalyzingSuggestHolder(preserveSep, preservePositionIncrements, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions,
+ hasPayloads, maxAnalyzedPathsForOneInput, fst));
+ }
+ final long ramBytesUsed = sizeInBytes;
+ return new LookupFactory() {
+ @Override
+ public Lookup getLookup(FieldMapper<?> mapper, CompletionSuggestionContext suggestionContext) {
+ AnalyzingSuggestHolder analyzingSuggestHolder = lookupMap.get(mapper.names().indexName());
+ if (analyzingSuggestHolder == null) {
+ return null;
+ }
+ int flags = analyzingSuggestHolder.preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester suggester;
+ if (suggestionContext.isFuzzy()) {
+ suggester = new XFuzzySuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ suggestionContext.getFuzzyEditDistance(), suggestionContext.isFuzzyTranspositions(),
+ suggestionContext.getFuzzyPrefixLength(), suggestionContext.getFuzzyMinLength(), false,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+
+ } else {
+ suggester = new XAnalyzingSuggester(mapper.indexAnalyzer(), mapper.searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ analyzingSuggestHolder.preservePositionIncrements,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+ return suggester;
+ }
+
+ @Override
+ public CompletionStats stats(String... fields) {
+ long sizeInBytes = 0;
+ ObjectLongOpenHashMap<String> completionFields = null;
+ if (fields != null && fields.length > 0) {
+ completionFields = new ObjectLongOpenHashMap<String>(fields.length);
+ }
+
+ for (Map.Entry<String, AnalyzingSuggestHolder> entry : lookupMap.entrySet()) {
+ sizeInBytes += entry.getValue().fst.sizeInBytes();
+ if (fields == null || fields.length == 0) {
+ continue;
+ }
+ for (String field : fields) {
+ // support for getting fields by regex as in fielddata
+ if (Regex.simpleMatch(field, entry.getKey())) {
+ long fstSize = entry.getValue().fst.sizeInBytes();
+ completionFields.addTo(field, fstSize);
+ }
+ }
+ }
+
+ return new CompletionStats(sizeInBytes, completionFields);
+ }
+ @Override
+ AnalyzingSuggestHolder getAnalyzingSuggestHolder(FieldMapper<?> mapper) {
+ return lookupMap.get(mapper.names().indexName());
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return ramBytesUsed;
+ }
+ };
+ }
+
+ /*
+ // might be readded when we change the current impl, right now not needed
+ static class AnalyzingSuggestHolder {
+ final boolean preserveSep;
+ final boolean preservePositionIncrements;
+ final int maxSurfaceFormsPerAnalyzedForm;
+ final int maxGraphExpansions;
+ final boolean hasPayloads;
+ final int maxAnalyzedPathsForOneInput;
+ final FST<Pair<Long, BytesRef>> fst;
+
+ public AnalyzingSuggestHolder(boolean preserveSep, boolean preservePositionIncrements, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ boolean hasPayloads, int maxAnalyzedPathsForOneInput, FST<Pair<Long, BytesRef>> fst) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+ this.maxGraphExpansions = maxGraphExpansions;
+ this.hasPayloads = hasPayloads;
+ this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
+ this.fst = fst;
+ }
+
+ }
+ */
+
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java b/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
new file mode 100644
index 0000000..650ac2a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.codecs.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.search.suggest.InputIterator;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
+import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LineFileDocs;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
+import org.elasticsearch.index.codec.postingsformat.PreBuiltPostingsFormatProvider;
+import org.elasticsearch.index.mapper.FieldMapper.Names;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.index.merge.Merges;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testCompletionPostingsFormat() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ List<Completion090PostingsFormat.CompletionLookupProvider> providers = Lists.newArrayList(providerV1, currentProvider);
+
+ Completion090PostingsFormat.CompletionLookupProvider randomProvider = providers.get(getRandom().nextInt(providers.size()));
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, randomProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ Lookup lookup = load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null));
+ List<LookupResult> result = lookup.lookup("ge", false, 10);
+ assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters"));
+ assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10"));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderBackwardCompatibilityForVersion1() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, providerV1);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null));
+ assertThat(analyzingSuggestHolder.sepLabel, is(AnalyzingCompletionLookupProviderV1.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(AnalyzingCompletionLookupProviderV1.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(AnalyzingCompletionLookupProviderV1.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderVersion2() throws IOException {
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, currentProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null));
+ assertThat(analyzingSuggestHolder.sepLabel, is(XAnalyzingSuggester.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(XAnalyzingSuggester.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(XAnalyzingSuggester.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testDuellCompletions() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException,
+ IllegalAccessException {
+ final boolean preserveSeparators = getRandom().nextBoolean();
+ final boolean preservePositionIncrements = getRandom().nextBoolean();
+ final boolean usePayloads = getRandom().nextBoolean();
+ final int options = preserveSeparators ? AnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester reference = new XAnalyzingSuggester(new StandardAnalyzer(TEST_VERSION_CURRENT), new StandardAnalyzer(
+ TEST_VERSION_CURRENT), options, 256, -1, preservePositionIncrements, null, false, 1, XAnalyzingSuggester.SEP_LABEL, XAnalyzingSuggester.PAYLOAD_SEP, XAnalyzingSuggester.END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ LineFileDocs docs = new LineFileDocs(getRandom());
+ int num = atLeast(150);
+ final String[] titles = new String[num];
+ final long[] weights = new long[num];
+ for (int i = 0; i < titles.length; i++) {
+ Document nextDoc = docs.nextDoc();
+ IndexableField field = nextDoc.getField("title");
+ titles[i] = field.stringValue();
+ weights[i] = between(0, 100);
+
+ }
+ docs.close();
+ final InputIterator primaryIter = new InputIterator() {
+ int index = 0;
+ long currentWeight = -1;
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return null;
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (index < titles.length) {
+ currentWeight = weights[index];
+ return new BytesRef(titles[index++]);
+ }
+ return null;
+ }
+
+ @Override
+ public long weight() {
+ return currentWeight;
+ }
+
+ @Override
+ public BytesRef payload() {
+ return null;
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return false;
+ }
+
+ };
+ InputIterator iter;
+ if (usePayloads) {
+ iter = new InputIterator() {
+ @Override
+ public long weight() {
+ return primaryIter.weight();
+ }
+
+ @Override
+ public Comparator<BytesRef> getComparator() {
+ return primaryIter.getComparator();
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ return primaryIter.next();
+ }
+
+ @Override
+ public BytesRef payload() {
+ return new BytesRef(Long.toString(weight()));
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return true;
+ }
+ };
+ } else {
+ iter = primaryIter;
+ }
+ reference.build(iter);
+ PostingsFormatProvider provider = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+
+ NamedAnalyzer namedAnalzyer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ final CompletionFieldMapper mapper = new CompletionFieldMapper(new Names("foo"), namedAnalzyer, namedAnalzyer, provider, null, usePayloads,
+ preserveSeparators, preservePositionIncrements, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null);
+ Lookup buildAnalyzingLookup = buildAnalyzingLookup(mapper, titles, titles, weights);
+ Field field = buildAnalyzingLookup.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ field.setAccessible(true);
+ Field refField = reference.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ refField.setAccessible(true);
+ assertThat(refField.get(reference), equalTo(field.get(buildAnalyzingLookup)));
+
+ for (int i = 0; i < titles.length; i++) {
+ int res = between(1, 10);
+ final StringBuilder builder = new StringBuilder();
+ SuggestUtils.analyze(namedAnalzyer.tokenStream("foo", titles[i]), new SuggestUtils.TokenConsumer() {
+ @Override
+ public void nextToken() throws IOException {
+ if (builder.length() == 0) {
+ builder.append(this.charTermAttr.toString());
+ }
+ }
+ });
+ String firstTerm = builder.toString();
+ String prefix = firstTerm.isEmpty() ? "" : firstTerm.substring(0, between(1, firstTerm.length()));
+ List<LookupResult> refLookup = reference.lookup(prefix, false, res);
+ List<LookupResult> lookup = buildAnalyzingLookup.lookup(prefix, false, res);
+ assertThat(refLookup.toString(),lookup.size(), equalTo(refLookup.size()));
+ for (int j = 0; j < refLookup.size(); j++) {
+ assertThat(lookup.get(j).key, equalTo(refLookup.get(j).key));
+ assertThat("prefix: " + prefix + " " + j + " -- missmatch cost: " + lookup.get(j).key + " - " + lookup.get(j).value + " | " + refLookup.get(j).key + " - " + refLookup.get(j).value ,
+ lookup.get(j).value, equalTo(refLookup.get(j).value));
+ assertThat(lookup.get(j).payload, equalTo(refLookup.get(j).payload));
+ if (usePayloads) {
+ assertThat(lookup.get(j).payload.utf8ToString(), equalTo(Long.toString(lookup.get(j).value)));
+ }
+ }
+ }
+ }
+
+ public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights)
+ throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ FilterCodec filterCodec = new FilterCodec("filtered", Codec.getDefault()) {
+ public PostingsFormat postingsFormat() {
+ return mapper.postingsFormatProvider().get();
+ }
+ };
+ IndexWriterConfig indexWriterConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, mapper.indexAnalyzer());
+
+ indexWriterConfig.setCodec(filterCodec);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+ for (int i = 0; i < weights.length; i++) {
+ Document doc = new Document();
+ BytesRef payload = mapper.buildPayload(new BytesRef(surfaces[i]), weights[i], new BytesRef(Long.toString(weights[i])));
+ doc.add(mapper.getCompletionField(terms[i], payload));
+ if (randomBoolean()) {
+ writer.commit();
+ }
+ writer.addDocument(doc);
+ }
+ writer.commit();
+ Merges.forceMerge(writer, 1);
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ assertThat(reader.leaves().size(), equalTo(1));
+ assertThat(reader.leaves().get(0).reader().numDocs(), equalTo(weights.length));
+ AtomicReaderContext atomicReaderContext = reader.leaves().get(0);
+ Terms luceneTerms = atomicReaderContext.reader().terms(mapper.name());
+ Lookup lookup = ((Completion090PostingsFormat.CompletionTerms) luceneTerms).getLookup(mapper, new CompletionSuggestionContext(null));
+ reader.close();
+ writer.close();
+ dir.close();
+ return lookup;
+ }
+
+ @Test
+ public void testNoDocs() throws IOException {
+ AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
+ DocValuesType.SORTED, DocValuesType.BINARY, new HashMap<String, String>());
+ TermsConsumer addField = consumer.addField(fieldInfo);
+ addField.finish(0, 0, 0);
+ consumer.close();
+ output.close();
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = provider.load(input);
+ PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat());
+ NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
+ assertNull(load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null)));
+ dir.close();
+ }
+
+ // TODO ADD more unittests
+ private void writeData(Directory dir, Completion090PostingsFormat.CompletionLookupProvider provider) throws IOException {
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
+ DocValuesType.SORTED, DocValuesType.BINARY, new HashMap<String, String>());
+ TermsConsumer addField = consumer.addField(fieldInfo);
+
+ PostingsConsumer postingsConsumer = addField.startTerm(new BytesRef("foofightersgenerator"));
+ postingsConsumer.startDoc(0, 1);
+ postingsConsumer.addPosition(256 - 2, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0,
+ 1);
+ postingsConsumer.finishDoc();
+ addField.finishTerm(new BytesRef("foofightersgenerator"), new TermStats(1, 1));
+ addField.startTerm(new BytesRef("generator"));
+ postingsConsumer.startDoc(0, 1);
+ postingsConsumer.addPosition(256 - 1, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0,
+ 1);
+ postingsConsumer.finishDoc();
+ addField.finishTerm(new BytesRef("generator"), new TermStats(1, 1));
+ addField.finish(1, 1, 1);
+ consumer.close();
+ output.close();
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
new file mode 100644
index 0000000..83fb721
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.synonym.SolrSynonymParser;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.SuggestMode;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class NoisyChannelSpellCheckerTests extends ElasticsearchTestCase{
+ private final BytesRef space = new BytesRef(" ");
+ private final BytesRef preTag = new BytesRef("<em>");
+ private final BytesRef postTag = new BytesRef("</em>");
+
+ @Test
+ public void testMarvelHeros() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Result result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2);
+ Correction[] corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ace"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american <em>ace</em>"));
+ assertThat(result.cutoffScore, greaterThan(0d));
+
+ result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 0, 1);
+ corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ame"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american ame"));
+ assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE));
+
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor the <em>god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn</em> the <em>god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the got jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+
+ // Test some of the highlighting corner cases
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xor teh god jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr the god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor <em>the god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn the god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("xor teh <em>god</em> jewel"));
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
+ }
+
+ @Test
+ public void testMarvelHerosMultiGenerator() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ mapping.put("body_reverse", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new ReverseStringFilter(Version.LUCENE_41, new LowerCaseFilter(Version.LUCENE_41, t)));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_reverse", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10);
+ DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse"));
+ CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse);
+
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ generator = new MultiCandidateGeneratorWrapper(5, forward, reverse);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), forward, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(0)); // only use forward with constant prefix
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("america cae"), generator, 2, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("zorr the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("gorr the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("varr the god jewel"));
+
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ }
+
+ @Test
+ public void testMarvelHerosTrigram() throws IOException {
+
+
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ return new TokenStreamComponents(t, new LowerCaseFilter(Version.LUCENE_41, t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_41), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_41, wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 1).corrections;
+ assertThat(corrections.length, equalTo(0));
+// assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape"));
+
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 100, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer t = new StandardTokenizer(Version.LUCENE_41, reader);
+ TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.95);
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+
+ wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(2));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
new file mode 100644
index 0000000..5305d99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.timeout;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.FilterBuilders.scriptFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SearchTimeoutTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return ImmutableSettings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ .build();
+ }
+
+ @Test
+ public void simpleTimeoutTest() throws Exception {
+ createIndex("test");
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTimeout("10ms")
+ .setQuery(filteredQuery(matchAllQuery(), scriptFilter("Thread.sleep(100); return true;")))
+ .execute().actionGet();
+ assertThat(searchResponse.isTimedOut(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/similarity/SimilarityTests.java b/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
new file mode 100644
index 0000000..6f72604
--- /dev/null
+++ b/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.similarity;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class SimilarityTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testCustomBM25Similarity() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("field1")
+ .field("similarity", "custom")
+ .field("type", "string")
+ .endObject()
+ .startObject("field2")
+ .field("similarity", "default")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("similarity.custom.type", "BM25")
+ .put("similarity.custom.k1", 2.0f)
+ .put("similarity.custom.b", 1.5f)
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumped over the lazy dog",
+ "field2", "the quick brown fox jumped over the lazy dog")
+ .setRefresh(true).execute().actionGet();
+
+ SearchResponse bm25SearchResponse = client().prepareSearch().setQuery(matchQuery("field1", "quick brown fox")).execute().actionGet();
+ assertThat(bm25SearchResponse.getHits().totalHits(), equalTo(1l));
+ float bm25Score = bm25SearchResponse.getHits().hits()[0].score();
+
+ SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet();
+ assertThat(defaultSearchResponse.getHits().totalHits(), equalTo(1l));
+ float defaultScore = defaultSearchResponse.getHits().hits()[0].score();
+
+ assertThat(bm25Score, not(equalTo(defaultScore)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java b/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
new file mode 100644
index 0000000..27620c9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepository;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Ignore;
+
+import java.io.File;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@Ignore
+public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest {
+
+ public static long getFailureCount(String repository) {
+ long failureCount = 0;
+ for (RepositoriesService repositoriesService : cluster().getInstances(RepositoriesService.class)) {
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ failureCount += mockRepository.getFailureCount();
+ }
+ return failureCount;
+ }
+
+ public static int numberOfFiles(File dir) {
+ int count = 0;
+ File[] files = dir.listFiles();
+ if (files != null) {
+ for (File file : files) {
+ if (file.isDirectory()) {
+ count += numberOfFiles(file);
+ } else {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ public static void stopNode(final String node) {
+ cluster().stopRandomNode(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings settings) {
+ return settings.get("name").equals(node);
+ }
+ });
+ }
+
+ public void waitForBlock(String node, String repository, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ RepositoriesService repositoriesService = cluster().getInstance(RepositoriesService.class, node);
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ if (mockRepository.blocked()) {
+ return;
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ }
+
+ public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ SnapshotId snapshotId = new SnapshotId(repository, snapshot);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
+ assertThat(snapshotInfos.size(), equalTo(1));
+ if (snapshotInfos.get(0).state().completed()) {
+ // Make sure that snapshot clean up operations are finished
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ SnapshotMetaData snapshotMetaData = stateResponse.getState().getMetaData().custom(SnapshotMetaData.TYPE);
+ if (snapshotMetaData == null || snapshotMetaData.snapshot(snapshotId) == null) {
+ return snapshotInfos.get(0);
+ }
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ return null;
+ }
+
+ public static String blockNodeWithIndex(String index) {
+ for(String node : cluster().nodesInclude("test-idx")) {
+ ((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
+ return node;
+ }
+ fail("No nodes for the index " + index + " found");
+ return null;
+ }
+
+ public static void unblockNode(String node) {
+ ((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000..0913f8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
+public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Test
+ public void restorePersistentSettingsTest() throws Exception {
+ logger.info("--> start node");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ Client client = client();
+
+ // Add dummy persistent setting
+ logger.info("--> set test persistent setting");
+ String settingValue = "test-" + randomInt();
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(ImmutableSettings.settingsBuilder().put(ThreadPool.THREADPOOL_GROUP + "dummy.value", settingValue)).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(settingValue));
+
+ logger.info("--> create repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> clean the test persistent setting");
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(ImmutableSettings.settingsBuilder().put(ThreadPool.THREADPOOL_GROUP + "dummy.value", "")).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(""));
+
+ logger.info("--> restore snapshot");
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().get(ThreadPool.THREADPOOL_GROUP + "dummy.value"), equalTo(settingValue));
+ }
+
+ @Test
+ public void snapshotDuringNodeShutdownTest() throws Exception {
+ logger.info("--> start 2 nodes");
+ ArrayList<String> nodes = newArrayList();
+ nodes.add(cluster().startNode());
+ nodes.add(cluster().startNode());
+ Client client = client();
+
+ assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0).put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode);
+ unblockNode(blockedNode);
+
+ logger.info("--> stopping node", blockedNode);
+ stopNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(60));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+ }
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void restoreIndexWithMissingShards() throws Exception {
+ logger.info("--> start 2 nodes");
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ cluster().startNode(settingsBuilder().put("gateway.type", "local"));
+ cluster().wipeIndices("_all");
+
+ assertAcked(prepareCreate("test-idx-1", 2, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen();
+
+ logger.info("--> indexing some data into test-idx-1");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+
+ logger.info("--> shutdown one of the nodes");
+ cluster().stopRandomNode();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ assertAcked(prepareCreate("test-idx-2", 1, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
+ ensureGreen("test-idx-2");
+
+ logger.info("--> indexing some data into test-idx-2");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot with default settings - should fail");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).execute().actionGet();
+
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+
+ createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setPartial(true).execute().actionGet();
+ logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(12));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(12));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(6));
+ assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ assertAcked(client().admin().indices().prepareClose("test-idx-1", "test-idx-2").execute().actionGet());
+
+ logger.info("--> restore incomplete snapshot - should fail");
+ assertThrows(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setWaitForCompletion(true).execute(), SnapshotRestoreException.class);
+
+ logger.info("--> restore snapshot for the index that was snapshotted completely");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-2").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ ensureGreen("test-idx-2");
+
+ assertThat(client().prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
new file mode 100644
index 0000000..7471f14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.repositories.RepositoryException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class RepositoriesTests extends AbstractSnapshotTests {
+
+ @Test
+ public void testRepositoryCreation() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> check that repository is really there");
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ MetaData metaData = clusterStateResponse.getState().getMetaData();
+ RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+
+ logger.info("--> creating anoter repository");
+ putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> check that both repositories are in cluster state");
+ clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ metaData = clusterStateResponse.getState().getMetaData();
+ repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repositories().size(), equalTo(2));
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+ assertThat(repositoriesMetaData.repository("test-repo-2"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-2").type(), equalTo("fs"));
+
+ logger.info("--> check that both repositories can be retrieved by getRepositories query");
+ GetRepositoriesResponse repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(2));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-1");
+ client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(1));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-2");
+ client.admin().cluster().prepareDeleteRepository("test-repo-2").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(0));
+ }
+
+ private RepositoryMetaData findRepository(ImmutableList<RepositoryMetaData> repositories, String name) {
+ for (RepositoryMetaData repository : repositories) {
+ if (repository.name().equals(name)) {
+ return repository;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testMisconfiguredRepository() throws Exception {
+ Client client = client();
+
+ logger.info("--> trying creating repository with incorrect settings");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo").setType("fs").get();
+ fail("Shouldn't be here");
+ } catch (RepositoryException ex) {
+ // Expected
+ }
+ }
+
+ @Test
+ public void repositoryAckTimeoutTest() throws Exception {
+
+ logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100))
+ )
+ .setTimeout("0s").get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
+ putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100))
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
+ DeleteRepositoryResponse deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-2")
+ .setTimeout("0s").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
+ deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000..754fa0e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
@@ -0,0 +1,983 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Override
+ public Settings indexSettings() {
+ // During restore we frequently restore index to exactly the same state it was before, that might cause the same
+ // checksum file to be written twice during restore operation
+ return ImmutableSettings.builder().put(super.indexSettings())
+ .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false)
+ .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) //TODO: Ask Simon if this is hiding an issue
+ .build();
+ }
+
+ @Test
+ public void basicWorkFlowTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
+ index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete some data");
+ for (int i = 0; i < 50; i++) {
+ client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 50; i < 100; i++) {
+ client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 0; i < 100; i += 2) {
+ client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
+
+ logger.info("--> close indices");
+ client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
+
+ // Test restore after index deletion
+ logger.info("--> delete indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-2");
+ logger.info("--> restore one index after deletion");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
+ }
+
+ @Test
+ public void restoreWithDifferentMappingsAndSettingsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ logger.info("--> create index with foo type");
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("refresh_interval", 10)));
+
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("foo").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> snapshot it");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete the index and recreate it with bar type");
+ cluster().wipeIndices("test-idx");
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("refresh_interval", 5)));
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> close index");
+ client.admin().indices().prepareClose("test-idx").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+
+ logger.info("--> assert that old mapping is restored");
+ ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test-idx").getMappings();
+ assertThat(mappings.get("foo"), notNullValue());
+ assertThat(mappings.get("bar"), nullValue());
+
+ logger.info("--> assert that old settings are restored");
+ GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
+ assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10"));
+ }
+
+ @Test
+ public void emptySnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ }
+
+ @Test
+ public void restoreTemplatesTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true));
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices(Strings.EMPTY_ARRAY).get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> restore cluster state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ // We don't restore any indices here
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices(Strings.EMPTY_ARRAY).get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
+ }
+
+ @Test
+ public void includeGlobalStateTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File location = newTempDir();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location)));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot without global state");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> snapshot with global state");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ cluster().wipeTemplates("test-template");
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> try restoring cluster state from snapshot without global state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template wasn't restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> restore cluster state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot without global state but with indices");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template and index ");
+ cluster().wipeIndices("test-idx");
+ cluster().wipeTemplates("test-template");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+
+ logger.info("--> try restoring index and cluster state from snapshot without global state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ ensureGreen();
+ logger.info("--> check that template wasn't restored but index was");
+ clusterStateResponse = client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).setIndexTemplates("test-template").setIndices().get();
+ assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void snapshotFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("random_control_io_exception_rate", 0.2)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ try {
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Random IOException"));
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ if (snapshotInfo.state() == SnapshotState.SUCCESS) {
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+ }
+ }
+ } catch (Exception ex) {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(ExceptionsHelper.detailedMessage(ex), containsString("IOException"));
+ }
+ }
+
+ @Test
+ public void dataFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.TEST))
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.1)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+ }
+
+ }
+
+ @Test
+ public void dataFileFailureDuringRestoreTest() throws Exception {
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.3)));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+ }
+
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void deletionOfFailingToRecoverIndexShouldStopRestore() throws Exception {
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 1.0) // Fail completely
+ ));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
+
+ logger.info("--> wait for the index to appear");
+ // that would mean that recovery process started and failing
+ assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> get restore results");
+ // Now read restore results and make sure it failed
+ RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards()));
+
+ logger.info("--> restoring working repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> trying to restore index again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+ ensureGreen();
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void unallocatedShardsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ logger.info("--> creating index that cannot be allocated");
+ prepareCreate("test-idx", 2, ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get();
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().reason(), startsWith("Indices don't have primary shards"));
+ }
+
+ @Test
+ public void deleteSnapshotTest() throws Exception {
+ final int numberOfSnapshots = between(5, 15);
+ Client client = client();
+
+ File repo = newTempDir(LifecycleScope.SUITE);
+ logger.info("--> creating repository at " + repo.getAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ int[] numberOfFiles = new int[numberOfSnapshots];
+ logger.info("--> creating {} snapshots ", numberOfSnapshots);
+ for (int i = 0; i < numberOfSnapshots; i++) {
+ for (int j = 0; j < 10; j++) {
+ index("test-idx", "doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
+ }
+ refresh();
+ logger.info("--> snapshot {}", i);
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ // Store number of files after each snapshot
+ numberOfFiles[i] = numberOfFiles(repo);
+ }
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+ int numberOfFilesBeforeDeletion = numberOfFiles(repo);
+
+ logger.info("--> delete all snapshots except the first one and last one");
+ for (int i = 1; i < numberOfSnapshots - 1; i++) {
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i).get();
+ }
+
+ int numberOfFilesAfterDeletion = numberOfFiles(repo);
+
+ assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", lastSnapshot).setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+
+ logger.info("--> delete the last snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get();
+ logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
+ assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
+ }
+
+ @Test
+ @TestLogging("snapshots:TRACE")
+ public void snapshotClosedIndexTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ createIndex("test-idx", "test-idx-closed");
+ ensureGreen();
+ logger.info("--> closing index test-idx-closed");
+ assertAcked(client.admin().indices().prepareClose("test-idx-closed"));
+ ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0));
+
+ logger.info("--> deleting snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+ }
+
+ @Test
+ public void renameOnRestoreTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", newTempDir(LifecycleScope.SUITE))));
+
+ createIndex("test-idx-1", "test-idx-2");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> restore indices with different names");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+ logger.info("--> close just restored indices");
+ client.admin().indices().prepareClose("test-idx-1-copy", "test-idx-2-copy").get();
+
+ logger.info("--> and try to restore these indices again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+
+ logger.info("--> close indices");
+ assertAcked(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy"));
+
+ logger.info("--> restore indices with different names");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+-2)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("same-name").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("test-idx-2").setRenameReplacement("test-idx-1").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using invalid index name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("__WRONG__").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (InvalidIndexNameException ex) {
+ // Expected
+ }
+ }
+
+ @Test
+ @TestLogging("cluster.routing.allocation.decider:TRACE")
+ public void moveShardWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode);
+ ImmutableSettings.Builder excludeSettings = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", blockedNode);
+ client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get();
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ @TestLogging("cluster.routing.allocation.decider:TRACE")
+ public void deleteRepositoryWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ File repositoryLocation = newTempDir(LifecycleScope.TEST);
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode);
+
+ try {
+ client.admin().cluster().prepareDeleteRepository("test-repo").execute().get();
+ fail("shouldn't be able to delete in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository deletion failed");
+ }
+
+ logger.info("--> trying to move repository to another location");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", new File(repositoryLocation, "test"))
+ ).get();
+ fail("shouldn't be able to replace in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository replacement failed");
+ }
+
+ logger.info("--> trying to create a repository with different name");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", new File(repositoryLocation, "test"))));
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ public void urlRepositoryTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File repositoryLocation = newTempDir(LifecycleScope.SUITE);
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> create read-only URL repository");
+ assertAcked(client.admin().cluster().preparePutRepository("url-repo")
+ .setType("url").setSettings(ImmutableSettings.settingsBuilder()
+ .put("url", repositoryLocation.toURI().toURL())
+ .put("list_directories", randomBoolean())));
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> list available shapshots");
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+
+ logger.info("--> delete snapshot");
+ DeleteSnapshotResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+ assertAcked(deleteSnapshotResponse);
+
+ logger.info("--> list available shapshot again, no snapshots should be returned");
+ getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
+ }
+
+ @Test
+ public void throttlingTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ File repositoryLocation = newTempDir(LifecycleScope.SUITE);
+ boolean throttleSnapshot = randomBoolean();
+ boolean throttleRestore = randomBoolean();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(ImmutableSettings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000))
+ .put("max_restore_bytes_per_sec", throttleRestore ? "2.5k" : "0")
+ .put("max_snapshot_bytes_per_sec", throttleSnapshot ? "2.5k" : "0")));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ long snapshotPause = 0L;
+ long restorePause = 0L;
+ for (RepositoriesService repositoriesService : cluster().getInstances(RepositoriesService.class)) {
+ snapshotPause += repositoriesService.repository("test-repo").snapshotThrottleTimeInNanos();
+ restorePause += repositoriesService.repository("test-repo").restoreThrottleTimeInNanos();
+ }
+
+ if (throttleSnapshot) {
+ assertThat(snapshotPause, greaterThan(0L));
+ } else {
+ assertThat(snapshotPause, equalTo(0L));
+ }
+
+ if (throttleRestore) {
+ assertThat(restorePause, greaterThan(0L));
+ } else {
+ assertThat(restorePause, equalTo(0L));
+ }
+ }
+
+ private boolean waitForIndex(String index, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ if (client().admin().indices().prepareExists(index).execute().actionGet().isExists()) {
+ return true;
+ }
+ Thread.sleep(100);
+ }
+ return false;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
new file mode 100644
index 0000000..ee6cefb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+
+/**
+ */
+public class SnapshotUtilsTests {
+ @Test
+ public void testIndexNameFiltering() {
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"*"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"ba*", "-bar", "-baz"}, new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-bar"}, new String[]{"foo", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-ba*"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+ba*"}, new String[]{"bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+bar", "+foo"}, new String[]{"bar", "foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"zzz", "bar"}, IndicesOptions.lenient(), new String[]{"bar"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{""}, IndicesOptions.lenient(), new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "", "ba*"}, IndicesOptions.lenient(), new String[]{"foo", "bar", "baz"});
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, String[] expected) {
+ assertIndexNameFiltering(indices, filter, IndicesOptions.lenient(), expected);
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, IndicesOptions indicesOptions, String[] expected) {
+ ImmutableList<String> indicesList = ImmutableList.copyOf(indices);
+ ImmutableList<String> actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions);
+ assertThat(actual, containsInAnyOrder(expected));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
new file mode 100644
index 0000000..3e80880
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+/**
+ *
+ */
+public class BlobStoreWrapper implements BlobStore {
+
+ private BlobStore delegate;
+
+ public BlobStoreWrapper(BlobStore delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ return delegate.immutableBlobContainer(path);
+ }
+
+ @Override
+ public void delete(BlobPath path) {
+ delegate.delete(path);
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ protected BlobStore delegate() {
+ return delegate;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java b/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java
new file mode 100644
index 0000000..d72409e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/ImmutableBlobContainerWrapper.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public class ImmutableBlobContainerWrapper implements ImmutableBlobContainer {
+ private ImmutableBlobContainer delegate;
+
+ public ImmutableBlobContainerWrapper(ImmutableBlobContainer delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
+ delegate.writeBlob(blobName, is, sizeInBytes, listener);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ delegate.writeBlob(blobName, is, sizeInBytes);
+ }
+
+ @Override
+ public BlobPath path() {
+ return delegate.path();
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return delegate.blobExists(blobName);
+ }
+
+ @Override
+ public void readBlob(String blobName, ReadBlobListener listener) {
+ delegate.readBlob(blobName, listener);
+ }
+
+ @Override
+ public byte[] readBlobFully(String blobName) throws IOException {
+ return delegate.readBlobFully(blobName);
+ }
+
+ @Override
+ public boolean deleteBlob(String blobName) throws IOException {
+ return delegate.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ delegate.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void deleteBlobsByFilter(BlobNameFilter filter) throws IOException {
+ delegate.deleteBlobsByFilter(filter);
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ return delegate.listBlobs();
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ return delegate.listBlobsByPrefix(blobNamePrefix);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
new file mode 100644
index 0000000..90c4e39
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.fs.FsRepository;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ */
+public class MockRepository extends FsRepository {
+
+ private final AtomicLong failureCounter = new AtomicLong();
+
+ public void resetFailureCount() {
+ failureCounter.set(0);
+ }
+
+ public long getFailureCount() {
+ return failureCounter.get();
+ }
+
+ private final double randomControlIOExceptionRate;
+
+ private final double randomDataFileIOExceptionRate;
+
+ private final long waitAfterUnblock;
+
+ private final MockBlobStore mockBlobStore;
+
+ private final String randomPrefix;
+
+ private volatile boolean blockOnControlFiles;
+
+ private volatile boolean blockOnDataFiles;
+
+ private volatile boolean blocked = false;
+
+ @Inject
+ public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
+ super(name, repositorySettings, indexShardRepository);
+ randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
+ randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
+ blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
+ blockOnDataFiles = repositorySettings.settings().getAsBoolean("block_on_data", false);
+ randomPrefix = repositorySettings.settings().get("random");
+ waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
+ logger.info("starting mock repository with random prefix " + randomPrefix);
+ mockBlobStore = new MockBlobStore(super.blobStore());
+ }
+
+ private void addFailure() {
+ failureCounter.incrementAndGet();
+ }
+
+ @Override
+ protected void doStop() throws ElasticsearchException {
+ unblock();
+ super.doStop();
+ }
+
+ @Override
+ protected BlobStore blobStore() {
+ return mockBlobStore;
+ }
+
+ public boolean blocked() {
+ return mockBlobStore.blocked();
+ }
+
+ public void unblock() {
+ mockBlobStore.unblockExecution();
+ }
+
+ public void blockOnDataFiles(boolean blocked) {
+ blockOnDataFiles = blocked;
+ }
+
+ public void blockOnControlFiles(boolean blocked) {
+ blockOnControlFiles = blocked;
+ }
+
+ public class MockBlobStore extends BlobStoreWrapper {
+ ConcurrentMap<String, AtomicLong> accessCounts = new ConcurrentHashMap<String, AtomicLong>();
+
+ private long incrementAndGet(String path) {
+ AtomicLong value = accessCounts.get(path);
+ if (value == null) {
+ value = accessCounts.putIfAbsent(path, new AtomicLong(1));
+ }
+ if (value != null) {
+ return value.incrementAndGet();
+ }
+ return 1;
+ }
+
+ public MockBlobStore(BlobStore delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
+ return new MockImmutableBlobContainer(super.immutableBlobContainer(path));
+ }
+
+ public synchronized void unblockExecution() {
+ if (blocked) {
+ blocked = false;
+ // Clean blocking flags, so we wouldn't try to block again
+ blockOnDataFiles = false;
+ blockOnControlFiles = false;
+ this.notifyAll();
+ }
+ }
+
+ public boolean blocked() {
+ return blocked;
+ }
+
+ private synchronized boolean blockExecution() {
+ boolean wasBlocked = false;
+ try {
+ while (blockOnDataFiles || blockOnControlFiles) {
+ blocked = true;
+ this.wait();
+ wasBlocked = true;
+ }
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ return wasBlocked;
+ }
+
+ private class MockImmutableBlobContainer extends ImmutableBlobContainerWrapper {
+ private MessageDigest digest;
+
+ private boolean shouldFail(String blobName, double probability) {
+ if (probability > 0.0) {
+ String path = path().add(blobName).buildAsString("/") + "/" + randomPrefix;
+ path += "/" + incrementAndGet(path);
+ logger.info("checking [{}] [{}]", path, Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability);
+ return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability;
+ } else {
+ return false;
+ }
+ }
+
+ private int hashCode(String path) {
+ try {
+ digest = MessageDigest.getInstance("MD5");
+ byte[] bytes = digest.digest(path.getBytes("UTF-8"));
+ int i = 0;
+ return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
+ | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
+ } catch (NoSuchAlgorithmException ex) {
+ throw new ElasticsearchException("cannot calculate hashcode", ex);
+ } catch (UnsupportedEncodingException ex) {
+ throw new ElasticsearchException("cannot calculate hashcode", ex);
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName) throws IOException {
+ if (blobName.startsWith("__")) {
+ if (shouldFail(blobName, randomDataFileIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnDataFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ } else {
+ if (shouldFail(blobName, randomControlIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnControlFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.WriterListener listener) {
+ try {
+ maybeIOExceptionOrBlock(blobName);
+ } catch (IOException ex) {
+ listener.onFailure(ex);
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.ReadBlobListener listener) {
+ try {
+ maybeIOExceptionOrBlock(blobName);
+ } catch (IOException ex) {
+ listener.onFailure(ex);
+ }
+ }
+
+
+ public MockImmutableBlobContainer(ImmutableBlobContainer delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
+ maybeIOExceptionOrBlock(blobName, listener);
+ super.writeBlob(blobName, is, sizeInBytes, listener);
+ }
+
+ @Override
+ public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ super.writeBlob(blobName, is, sizeInBytes);
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return super.blobExists(blobName);
+ }
+
+ @Override
+ public void readBlob(String blobName, ReadBlobListener listener) {
+ maybeIOExceptionOrBlock(blobName, listener);
+ super.readBlob(blobName, listener);
+ }
+
+ @Override
+ public byte[] readBlobFully(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ return super.readBlobFully(blobName);
+ }
+
+ @Override
+ public boolean deleteBlob(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ return super.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ super.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void deleteBlobsByFilter(BlobNameFilter filter) throws IOException {
+ maybeIOExceptionOrBlock("");
+ super.deleteBlobsByFilter(filter);
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
+ maybeIOExceptionOrBlock("");
+ return super.listBlobs();
+ }
+
+ @Override
+ public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ return super.listBlobsByPrefix(blobNamePrefix);
+ }
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
new file mode 100644
index 0000000..0da50f1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.repositories.Repository;
+
+/**
+ */
+public class MockRepositoryModule extends AbstractModule {
+
+ public MockRepositoryModule() {
+ super();
+ }
+
+ @Override
+ protected void configure() {
+ bind(Repository.class).to(MockRepository.class).asEagerSingleton();
+ bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
+ }
+
+}
+
diff --git a/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
new file mode 100644
index 0000000..86ae039
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.client;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ */
+public class ClientFailover {
+
+ public static void main(String[] args) throws Exception {
+ Node[] nodes = new Node[3];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ final TransportClient client = new TransportClient()
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9301))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9302));
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong indexed = new AtomicLong();
+ final CountDownLatch latch = new CountDownLatch(1);
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ try {
+ client.prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ indexed.incrementAndGet();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ indexer.start();
+
+ for (int i = 0; i < 100; i++) {
+ int index = i % nodes.length;
+ nodes[index].close();
+
+ ClusterHealthResponse health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+
+ nodes[index] = NodeBuilder.nodeBuilder().node();
+
+ health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+ }
+
+ latch.await();
+
+ // TODO add verification to the number of indexed docs
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
new file mode 100644
index 0000000..8885d09
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.fullrestart;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+
+import java.io.File;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class FullRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfNodes = 4;
+
+ private boolean clearNodeWork = false;
+
+ private int numberOfIndices = 5;
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private int bulkSize = 1000;
+ private int numberOfDocsPerRound = 50000;
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+
+ public FullRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public FullRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public FullRestartStressTest bulkSize(int bulkSize) {
+ this.bulkSize = bulkSize;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfDocsPerRound(int numberOfDocsPerRound) {
+ this.numberOfDocsPerRound = numberOfDocsPerRound;
+ return this;
+ }
+
+ public FullRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public FullRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public FullRestartStressTest clearNodeWork(boolean clearNodeWork) {
+ this.clearNodeWork = clearNodeWork;
+ return this;
+ }
+
+ public void run() throws Exception {
+ long numberOfRounds = 0;
+ Random random = new Random(0);
+ long testStart = System.currentTimeMillis();
+ while (true) {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ Node client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ // verify that the indices are there
+ for (int i = 0; i < numberOfIndices; i++) {
+ try {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ } catch (Exception e) {
+ // might already exists, fine
+ }
+ }
+
+ logger.info("*** Waiting for GREEN status");
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("*** index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ // verify count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! count does not match, index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ // verify search
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ // do a search with norms field, so we don't rely on match all filtering cache
+ SearchResponse search = client.client().prepareSearch().setQuery(matchAllQuery().normsField("field")).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! search does not match, index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ logger.info("*** ROUND {}", ++numberOfRounds);
+ // bulk index data
+ int numberOfBulks = numberOfDocsPerRound / bulkSize;
+ for (int b = 0; b < numberOfBulks; b++) {
+ BulkRequestBuilder bulk = client.client().prepareBulk();
+ for (int k = 0; k < bulkSize; k++) {
+ StringBuffer sb = new StringBuffer();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = ThreadLocalRandom.current().nextInt() % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ bulk.add(Requests.indexRequest("test" + (Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices)).type("type1").source(json));
+ indexCounter.incrementAndGet();
+ }
+ bulk.execute().actionGet();
+ }
+
+ client.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ client.close();
+ for (Node node : nodes) {
+ File[] nodeDatas = ((InternalNode) node).injector().getInstance(NodeEnvironment.class).nodeDataLocations();
+ node.close();
+ if (clearNodeWork && !settings.get("gateway.type").equals("local")) {
+ FileSystemUtils.deleteRecursively(nodeDatas);
+ }
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished, full_restart_rounds [{}]", numberOfRounds);
+ break;
+ }
+
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfNodes = 2;
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("gateway.type", "local")
+ .put("gateway.recover_after_nodes", numberOfNodes)
+ .put("index.number_of_shards", 1)
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ FullRestartStressTest test = new FullRestartStressTest()
+ .settings(settings)
+ .period(TimeValue.timeValueMinutes(20))
+ .clearNodeWork(false) // only applies to shared gateway
+ .numberOfNodes(numberOfNodes)
+ .numberOfIndices(1)
+ .textTokens(150)
+ .numberOfFields(10)
+ .bulkSize(1000)
+ .numberOfDocsPerRound(10000);
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java b/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
new file mode 100644
index 0000000..67a092b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.gcbehavior;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.FilterBuilders.rangeFilter;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+public class FilterCacheGcStress {
+
+ public static void main(String[] args) {
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+ final Client client = node.client();
+
+ client.admin().indices().prepareCreate("test").execute().actionGet();
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+
+ final AtomicBoolean stop = new AtomicBoolean();
+
+ Thread indexingThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareIndex("test", "type1").setSource("field", System.currentTimeMillis()).execute().actionGet();
+ }
+ }
+ };
+ indexingThread.start();
+
+ Thread searchThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), rangeFilter("field").from(System.currentTimeMillis() - 1000000)))
+ .execute().actionGet();
+ }
+ }
+ };
+
+ searchThread.start();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java b/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
new file mode 100644
index 0000000..b1c24c2
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class GetStressTest {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build();
+
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_THREADS = 50;
+ final TimeValue TEST_TIME = TimeValue.parseTimeValue("10m", null);
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ final Node client = NodeBuilder.nodeBuilder()
+ .settings(settings)
+ .client(true)
+ .node();
+
+ client.client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong counter = new AtomicLong();
+
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ThreadLocalRandom random = ThreadLocalRandom.current();
+ while (!done.get()) {
+ String id = String.valueOf(idGenerator.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setSource("field", random.nextInt(100))
+ .execute().actionGet();
+
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id)
+ //.setFields(Strings.EMPTY_ARRAY)
+ .execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("Failed to find " + id);
+ }
+
+ long count = counter.incrementAndGet();
+ if ((count % 10000) == 0) {
+ System.out.println("Executed " + count);
+ }
+ }
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ Thread.sleep(TEST_TIME.millis());
+
+ System.out.println("test done.");
+ done.set(true);
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java b/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
new file mode 100644
index 0000000..11092ca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import com.google.common.collect.Sets;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ */
+public class MGetStress1 {
+
+ public static void main(String[] args) throws Exception {
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_DOCS = 50000;
+ final int MGET_BATCH = 1000;
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ System.out.println("---> START Indexing initial data [" + NUMBER_OF_DOCS + "]");
+ final Client client = nodes[0].client();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ System.out.println("---> DONE Indexing initial data [" + NUMBER_OF_DOCS + "]");
+
+ final AtomicBoolean done = new AtomicBoolean();
+ // start indexer
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ client.prepareIndex("test", "type", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
+ .setSource("field", "value").execute().actionGet();
+ }
+ }
+ });
+ indexer.start();
+ System.out.println("---> Starting indexer");
+
+ // start the mget one
+ Thread mget = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ Set<String> ids = Sets.newHashSet();
+ for (int i = 0; i < MGET_BATCH; i++) {
+ ids.add(Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)));
+ }
+ //System.out.println("---> mget for [" + ids.size() + "]");
+ MultiGetResponse response = client.prepareMultiGet().add("test", "type", ids).execute().actionGet();
+ int expected = ids.size();
+ int count = 0;
+ for (MultiGetItemResponse item : response) {
+ count++;
+ if (item.isFailed()) {
+ System.err.println("item failed... " + item.getFailure());
+ } else {
+ boolean removed = ids.remove(item.getId());
+ if (!removed) {
+ System.err.println("got id twice " + item.getId());
+ }
+ }
+ }
+ if (expected != count) {
+ System.err.println("Expected [" + expected + "], got back [" + count + "]");
+ }
+ }
+ }
+ });
+ mget.start();
+ System.out.println("---> Starting mget");
+
+ Thread.sleep(TimeValue.timeValueMinutes(10).millis());
+
+ done.set(true);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java b/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
new file mode 100644
index 0000000..2b84fb0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+/**
+ */
+public class BulkIndexingStressTest {
+
+ public static void main(String[] args) {
+ final int NUMBER_OF_NODES = 4;
+ final int NUMBER_OF_INDICES = 600;
+ final int BATCH = 300;
+
+ final Settings nodeSettings = ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2).build();
+
+// ESLogger logger = Loggers.getLogger("org.elasticsearch");
+// logger.setLevel("DEBUG");
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(nodeSettings).node();
+ }
+
+ Client client = nodes.length == 1 ? nodes[0].client() : nodes[1].client();
+
+ while (true) {
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ for (int i = 0; i < BATCH; i++) {
+ bulkRequest.add(Requests.indexRequest("test" + ThreadLocalRandom.current().nextInt(NUMBER_OF_INDICES)).type("type").source("field", "value"));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ if (bulkResponse.hasFailures()) {
+ for (BulkItemResponse item : bulkResponse) {
+ if (item.isFailed()) {
+ System.out.println("failed response:" + item.getFailureMessage());
+ }
+ }
+
+ throw new RuntimeException("Failed responses");
+ }
+ ;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java b/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
new file mode 100644
index 0000000..15d324c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ * Checks that index operation does not create duplicate documents.
+ */
+public class ConcurrentIndexingVersioningStressTest {
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Node node1 = nodeBuilder().settings(settings).node();
+ Node node2 = nodeBuilder().settings(settings).node();
+ final Node client = nodeBuilder().settings(settings).client(true).node();
+
+ final int NUMBER_OF_DOCS = 10000;
+ final int NUMBER_OF_THREADS = 10;
+ final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles();
+ final long DELETE_EVERY = 10;
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ if ((i % DELETE_EVERY) == 0) {
+ client.client().prepareDelete("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).execute().actionGet();
+ } else {
+ client.client().prepareIndex("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).setSource("field1", "value1").execute().actionGet();
+ }
+ }
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ }
+
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+ System.out.println("done indexing, verifying docs");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ String id = Integer.toString(i);
+ for (int j = 0; j < 5; j++) {
+ SearchResponse response = client.client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).execute().actionGet();
+ if (response.getHits().totalHits() > 1) {
+ System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]");
+ }
+ }
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ long version = getResponse.getVersion();
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ if (version != getResponse.getVersion()) {
+ System.err.println("[" + i + "] FAIL, DIFFERENT VERSIONS: [" + version + "], [" + getResponse.getVersion() + "]");
+ break;
+ }
+ }
+ } else {
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ }
+ }
+ }
+ System.out.println("done.");
+
+ client.close();
+ node1.close();
+ node2.close();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java b/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
new file mode 100644
index 0000000..6bf78d5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.monitor.jvm.JvmService;
+import org.elasticsearch.monitor.network.NetworkService;
+import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.process.ProcessService;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+
+public class GenericStatsLeak {
+
+ public static void main(String[] args) {
+ InternalNode node = (InternalNode) NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder()
+ .put("monitor.os.refresh_interval", 0)
+ .put("monitor.process.refresh_interval", 0)
+ .put("monitor.network.refresh_interval", 0)
+ ).node();
+
+ JvmService jvmService = node.injector().getInstance(JvmService.class);
+ OsService osService = node.injector().getInstance(OsService.class);
+ ProcessService processService = node.injector().getInstance(ProcessService.class);
+ NetworkService networkService = node.injector().getInstance(NetworkService.class);
+
+ while (true) {
+ jvmService.stats();
+ osService.stats();
+ processService.stats();
+ networkService.stats();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java b/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
new file mode 100644
index 0000000..e558b47
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+/**
+ * This test mainly comes to check the native memory leak with getLastGCInfo (which is now
+ * disabled by default).
+ */
+public class JvmStatsLeak {
+
+ public static void main(String[] args) {
+ while (true) {
+ JvmStats.jvmStats();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
new file mode 100644
index 0000000..8a20dca
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+/**
+ *
+ */
+public class ManyIndicesRemoteStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesRemoteStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfShards = 1;
+ int numberOfReplicas = 1;
+ int numberOfIndices = 1000;
+ int numberOfDocs = 1;
+
+ Client client;
+ Node node = null;
+ if (true) {
+ client = new TransportClient().addTransportAddress(new InetSocketTransportAddress("localhost", 9300));
+ } else {
+ node = NodeBuilder.nodeBuilder().client(true).node();
+ client = node.client();
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ client.admin().indices().prepareCreate("index_" + i)
+ .setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas))
+ .execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ client.prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}]", i);
+ }
+
+ client.admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ logger.info("closing node...");
+ if (node != null) {
+ node.close();
+ }
+ logger.info("node closed");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
new file mode 100644
index 0000000..4d437bc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class ManyIndicesStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfIndices = 100;
+ int numberOfDocs = 100;
+
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("index.shard.check_on_startup", false)
+ .put("gateway.type", "local")
+ .put("index.number_of_shards", 1)
+ .build();
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ node.client().admin().indices().prepareCreate("index_" + i).execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ node.client().prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}] ...", i);
+ }
+
+ node.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+
+ logger.info("starting node...");
+ node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ ClusterHealthResponse health = node.client().admin().cluster().prepareHealth().setTimeout("5m").setWaitForYellowStatus().execute().actionGet();
+ logger.info("health: " + health.getStatus());
+ logger.info("active shards: " + health.getActiveShards());
+ logger.info("active primary shards: " + health.getActivePrimaryShards());
+ if (health.isTimedOut()) {
+ logger.error("Timed out on health...");
+ }
+
+ ClusterState clusterState = node.client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (int i = 0; i < numberOfIndices; i++) {
+ if (clusterState.blocks().indices().containsKey("index_" + i)) {
+ logger.error("index [{}] has blocks: {}", i, clusterState.blocks().indices().get("index_" + i));
+ }
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ long count = node.client().prepareCount("index_" + i).setQuery(matchAllQuery()).execute().actionGet().getCount();
+ if (count == numberOfDocs) {
+ logger.info("VERIFIED [{}], count [{}]", i, count);
+ } else {
+ logger.error("FAILED [{}], expected [{}], got [{}]", i, numberOfDocs, count);
+ }
+ }
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
new file mode 100644
index 0000000..45cbd02
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.List;
+
+public class ManyNodesManyIndicesRecoveryStressTest {
+
+ public static void main(String[] args) throws Exception {
+ final int NUM_NODES = 40;
+ final int NUM_INDICES = 100;
+ final int NUM_DOCS = 2;
+ final int FLUSH_AFTER = 1;
+
+ final Settings nodeSettings = ImmutableSettings.settingsBuilder()
+ .put("transport.netty.connections_per_node.low", 0)
+ .put("transport.netty.connections_per_node.med", 0)
+ .put("transport.netty.connections_per_node.high", 1)
+ .build();
+
+ final Settings indexSettings = ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .build();
+
+ List<Node> nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ Client client = nodes.get(0).client();
+
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ System.out.println("--> Processing index [" + indexName + "]...");
+ client.admin().indices().prepareCreate(indexName).setSettings(indexSettings).execute().actionGet();
+
+ boolean flushed = false;
+ for (int doc = 0; doc < NUM_DOCS; doc++) {
+ if (!flushed && doc > FLUSH_AFTER) {
+ flushed = true;
+ client.admin().indices().prepareFlush(indexName).execute().actionGet();
+ }
+ client.prepareIndex(indexName, "type1", Integer.toString(doc)).setSource("field", "value" + doc).execute().actionGet();
+ }
+ System.out.println("--> DONE index [" + indexName + "]");
+ }
+
+ System.out.println("--> Initiating shutdown");
+ client.admin().cluster().prepareNodesShutdown().setExit(false).execute().actionGet();
+
+ System.out.println("--> Waiting for all nodes to be closed...");
+ while (true) {
+ boolean allAreClosed = true;
+ for (Node node : nodes) {
+ if (!node.isClosed()) {
+ allAreClosed = false;
+ break;
+ }
+ }
+ if (allAreClosed) {
+ break;
+ }
+ Thread.sleep(100);
+ }
+ System.out.println("Waiting a bit for node lock to really be released?");
+ Thread.sleep(5000);
+ System.out.println("--> All nodes are closed, starting back...");
+
+ nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(ImmutableSettings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ client = nodes.get(0).client();
+
+ System.out.println("--> Waiting for green status");
+ while (true) {
+ ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ System.err.println("--> cluster health timed out..., active shards [" + clusterHealth.getActiveShards() + "]");
+ } else {
+ break;
+ }
+ }
+
+ System.out.println("Verifying counts...");
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ CountResponse count = client.prepareCount(indexName).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ if (count.getCount() != NUM_DOCS) {
+ System.err.println("Wrong count value, expected [" + NUM_DOCS + "], got [" + count.getCount() + "] for index [" + indexName + "]");
+ }
+ }
+
+ System.out.println("Test end");
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java b/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
new file mode 100644
index 0000000..3934b38
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.refresh;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.UUID;
+
+/**
+ */
+public class RefreshStressTest1 {
+
+ public static void main(String[] args) throws InterruptedException {
+ int numberOfShards = 5;
+ Node node = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ ImmutableSettings.settingsBuilder()
+ .put("node.name", "node1")
+ .put("gateway.type", "none")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Node node2 = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ ImmutableSettings.settingsBuilder()
+ .put("node.name", "node2")
+ .put("gateway.type", "none")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Client client = node.client();
+
+ for (int loop = 1; loop < 1000; loop++) {
+ String indexName = "testindex" + loop;
+ String typeName = "testType" + loop;
+ String id = UUID.randomUUID().toString();
+ String mapping = "{ \"" + typeName + "\" : {\"dynamic_templates\" : [{\"no_analyze_strings\" : {\"match_mapping_type\" : \"string\",\"match\" : \"*\",\"mapping\" : {\"type\" : \"string\",\"index\" : \"not_analyzed\"}}}]}}";
+ client.admin().indices().prepareCreate(indexName).execute().actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(mapping).execute().actionGet();
+// sleep after put mapping
+// Thread.sleep(100);
+
+ System.out.println("indexing " + loop);
+ String name = "name" + id;
+ client.prepareIndex(indexName, typeName, id).setSource("{ \"id\": \"" + id + "\", \"name\": \"" + name + "\" }").execute().actionGet();
+
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+// sleep after refresh
+// Thread.sleep(100);
+
+ System.out.println("searching " + loop);
+ SearchResponse result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ if (result.getHits().hits().length != 1) {
+ for (int i = 1; i <= 100; i++) {
+ System.out.println("retry " + loop + ", " + i + ", previous total hits: " + result.getHits().getTotalHits());
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(100);
+ result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ if (result.getHits().hits().length == 1) {
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ result = client.prepareSearch(indexName).setPostFilter(FilterBuilders.termFilter("name", name)).execute().actionGet();
+ throw new RuntimeException("Record found after " + (i * 100) + " ms, second go: " + result.getHits().hits().length);
+ } else if (i == 100) {
+ if (client.prepareGet(indexName, typeName, id).execute().actionGet().isExists())
+ throw new RuntimeException("Record wasn't found after 10s but can be get by id");
+ else throw new RuntimeException("Record wasn't found after 10s and can't be get by id");
+ }
+ }
+ }
+
+ //client.admin().indices().prepareDelete(indexName).execute().actionGet();
+ }
+ client.close();
+ node2.close();
+ node.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
new file mode 100644
index 0000000..34e75bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+import java.util.Random;
+
+/**
+ */
+public class QuickRollingRestartStressTest {
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Random random = new Random();
+
+ Settings settings = ImmutableSettings.settingsBuilder().build();
+
+ Node[] nodes = new Node[5];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Node client = NodeBuilder.nodeBuilder().client(true).node();
+
+ long COUNT;
+ if (client.client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ throw new ElasticsearchException("failed to wait for green state on startup...");
+ }
+ COUNT = client.client().prepareCount().execute().actionGet().getCount();
+ System.out.println("--> existing index, count [" + COUNT + "]");
+ } else {
+ COUNT = SizeValue.parseSizeValue("100k").singles();
+ System.out.println("--> indexing data...");
+ for (long i = 0; i < COUNT; i++) {
+ client.client().prepareIndex("test", "type", Long.toString(i))
+ .setSource("date", new Date(), "data", RandomStrings.randomAsciiOfLength(random, 10000))
+ .execute().actionGet();
+ }
+ System.out.println("--> done indexing data [" + COUNT + "]");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ System.err.println("--> the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ }
+
+ final int ROLLING_RESTARTS = 100;
+ System.out.println("--> starting rolling restarts [" + ROLLING_RESTARTS + "]");
+ for (int rollingRestart = 0; rollingRestart < ROLLING_RESTARTS; rollingRestart++) {
+ System.out.println("--> doing rolling restart [" + rollingRestart + "]...");
+ int nodeId = ThreadLocalRandom.current().nextInt();
+ for (int i = 0; i < nodes.length; i++) {
+ int nodeIdx = Math.abs(nodeId++) % nodes.length;
+ nodes[nodeIdx].close();
+ nodes[nodeIdx] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ System.out.println("--> done rolling restart [" + rollingRestart + "]");
+
+ System.out.println("--> waiting for green state now...");
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForRelocatingShards(0).setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> timed out waiting for green state...");
+ ClusterState state = client.client().admin().cluster().prepareState().execute().actionGet().getState();
+ System.out.println(state.nodes().prettyPrint());
+ System.out.println(state.routingTable().prettyPrint());
+ System.out.println(state.routingNodes().prettyPrint());
+ throw new ElasticsearchException("timed out waiting for green state");
+ } else {
+ System.out.println("--> got green status");
+ }
+
+ System.out.println("--> checking data [" + rollingRestart + "]....");
+ boolean failed = false;
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ failed = true;
+ System.err.println("--> ERROR the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ if (!failed) {
+ System.out.println("--> count verified");
+ }
+ }
+
+ System.out.println("--> shutting down...");
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java b/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
new file mode 100644
index 0000000..12aaa5a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
+import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
+import org.elasticsearch.action.admin.indices.status.ShardStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.search.SearchHit;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class RollingRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfShards = 5;
+ private int numberOfReplicas = 1;
+ private int numberOfNodes = 4;
+
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private long initialNumberOfDocs = 100000;
+
+ private int indexers = 0;
+
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private boolean clearNodeData = true;
+
+ private Node client;
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong idCounter = new AtomicLong();
+
+
+ public RollingRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfShards(int numberOfShards) {
+ this.numberOfShards = numberOfShards;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfReplicas(int numberOfReplicas) {
+ this.numberOfReplicas = numberOfReplicas;
+ return this;
+ }
+
+ public RollingRestartStressTest initialNumberOfDocs(long initialNumberOfDocs) {
+ this.initialNumberOfDocs = initialNumberOfDocs;
+ return this;
+ }
+
+ public RollingRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public RollingRestartStressTest indexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public RollingRestartStressTest indexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public RollingRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public RollingRestartStressTest cleanNodeData(boolean clearNodeData) {
+ this.clearNodeData = clearNodeData;
+ return this;
+ }
+
+ public RollingRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public void run() throws Exception {
+ Random random = new Random(0);
+
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ client.client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", numberOfReplicas)
+ ).execute().actionGet();
+
+ logger.info("********** [START] INDEXING INITIAL DOCS");
+ for (long i = 0; i < initialNumberOfDocs; i++) {
+ indexDoc(random);
+ }
+ logger.info("********** [DONE ] INDEXING INITIAL DOCS");
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].start();
+ }
+
+ long testStart = System.currentTimeMillis();
+
+ // start doing the rolling restart
+ int nodeIndex = 0;
+ while (true) {
+ File[] nodeData = ((InternalNode) nodes[nodeIndex]).injector().getInstance(NodeEnvironment.class).nodeDataLocations();
+ nodes[nodeIndex].close();
+ if (clearNodeData) {
+ FileSystemUtils.deleteRecursively(nodeData);
+ }
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ Thread.sleep(1000);
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ if (++nodeIndex == nodes.length) {
+ nodeIndex = 0;
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished");
+ break;
+ }
+ }
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ if (!indexerThreads[i].closed) {
+ logger.warn("thread not closed!");
+ }
+ }
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // check the status
+ IndicesStatusResponse status = client.client().admin().indices().prepareStatus("test").execute().actionGet();
+ for (IndexShardStatus shardStatus : status.getIndex("test")) {
+ ShardStatus shard = shardStatus.getShards()[0];
+ logger.info("shard [{}], docs [{}]", shard.getShardId(), shard.getDocs().getNumDocs());
+ for (ShardStatus shardStatu : shardStatus) {
+ if (shard.getDocs().getNumDocs() != shardStatu.getDocs().getNumDocs()) {
+ logger.warn("shard doc number does not match!, got {} and {}", shard.getDocs().getNumDocs(), shardStatu.getDocs().getNumDocs());
+ }
+ }
+ }
+
+ // check the count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("indexed [{}], count [{}], [{}]", count.getCount(), indexCounter.get(), count.getCount() == indexCounter.get() ? "OK" : "FAIL");
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("count does not match!");
+ }
+ }
+
+ // scan all the docs, verify all have the same version based on the number of replicas
+ SearchResponse searchResponse = client.client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(50)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+ logger.info("Verifying versions for {} hits...", searchResponse.getHits().totalHits());
+
+ while (true) {
+ searchResponse = client.client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ for (SearchHit hit : searchResponse.getHits()) {
+ long version = -1;
+ for (int i = 0; i < (numberOfReplicas + 1); i++) {
+ GetResponse getResponse = client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
+ if (version == -1) {
+ version = getResponse.getVersion();
+ } else {
+ if (version != getResponse.getVersion()) {
+ logger.warn("Doc {} has different version numbers {} and {}", hit.id(), version, getResponse.getVersion());
+ }
+ }
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+ logger.info("Done verifying versions");
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ Random random = new Random(0);
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc(random);
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc(Random random) throws Exception {
+ StringBuilder sb = new StringBuilder();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ String id = Long.toString(idCounter.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setCreate(true)
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Settings settings = settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("gateway.type", "none")
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ RollingRestartStressTest test = new RollingRestartStressTest()
+ .settings(settings)
+ .numberOfNodes(4)
+ .numberOfShards(5)
+ .numberOfReplicas(1)
+ .initialNumberOfDocs(1000)
+ .textTokens(150)
+ .numberOfFields(10)
+ .cleanNodeData(false)
+ .indexers(5)
+ .indexerThrottle(TimeValue.timeValueMillis(50))
+ .period(TimeValue.timeValueMinutes(3));
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java b/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
new file mode 100644
index 0000000..772db8f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.junit.Ignore;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Tests that data don't get corrupted while reading it over the streams.
+ * <p/>
+ * See: https://github.com/elasticsearch/elasticsearch/issues/1686.
+ */
+@Ignore("Stress Test")
+public class ConcurrentSearchSerializationTests {
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = ImmutableSettings.settingsBuilder().put("gateway.type", "none").build();
+
+ Node node1 = NodeBuilder.nodeBuilder().settings(settings).node();
+ Node node2 = NodeBuilder.nodeBuilder().settings(settings).node();
+ Node node3 = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ final Client client = node1.client();
+
+ System.out.println("Indexing...");
+ final String data = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), 100);
+ final CountDownLatch latch1 = new CountDownLatch(100);
+ for (int i = 0; i < 100; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i))
+ .setSource("field", data)
+ .execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse indexResponse) {
+ latch1.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ latch1.countDown();
+ }
+ });
+ }
+ latch1.await();
+ System.out.println("Indexed");
+
+ System.out.println("searching...");
+ Thread[] threads = new Thread[10];
+ final CountDownLatch latch = new CountDownLatch(threads.length);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1000; i++) {
+ SearchResponse searchResponse = client.prepareSearch("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(i % 100)
+ .execute().actionGet();
+ for (SearchHit hit : searchResponse.getHits()) {
+ try {
+ if (!hit.sourceAsMap().get("field").equals(data)) {
+ System.err.println("Field not equal!");
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+
+ System.out.println("done searching");
+ client.close();
+ node1.close();
+ node2.close();
+ node3.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
new file mode 100644
index 0000000..f9a9cd5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.transport.RemoteTransportException;
+
+import java.io.IOException;
+import java.util.*;
+
+
+public class ParentChildStressTest {
+
+ private Node elasticNode;
+ private Client client;
+
+ private static final String PARENT_TYPE_NAME = "content";
+ private static final String CHILD_TYPE_NAME = "contentFiles";
+ private static final String INDEX_NAME = "acme";
+
+ /**
+ * Constructor. Initialize elastic and create the index/mapping
+ */
+ public ParentChildStressTest() {
+ NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder();
+ Settings settings = nodeBuilder.settings()
+ .build();
+ this.elasticNode = nodeBuilder.settings(settings).client(true).node();
+ this.client = this.elasticNode.client();
+
+ String mapping =
+ "{\"contentFiles\": {" +
+ "\"_parent\": {" +
+ "\"type\" : \"content\"" +
+ "}}}";
+
+ try {
+ client.admin().indices().create(new CreateIndexRequest(INDEX_NAME).mapping(CHILD_TYPE_NAME, mapping)).actionGet();
+ } catch (RemoteTransportException e) {
+ // usually means the index is already created.
+ }
+ }
+
+ public void shutdown() {
+ client.close();
+ elasticNode.close();
+ }
+
+ /**
+ * Deletes the item from both the parent and child type locations.
+ */
+ public void deleteById(String id) {
+ client.prepareDelete(INDEX_NAME, PARENT_TYPE_NAME, id).execute().actionGet();
+ client.prepareDelete(INDEX_NAME, CHILD_TYPE_NAME, id).execute().actionGet();
+ }
+
+ /**
+ * Index a parent doc
+ */
+ public void indexParent(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ // index content
+ client.prepareIndex(INDEX_NAME, PARENT_TYPE_NAME, id).setSource(builder.map(objectMap)).execute().actionGet();
+ }
+
+ /**
+ * Index the file as a child doc
+ */
+ public void indexChild(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ IndexRequestBuilder indexRequestbuilder = client.prepareIndex(INDEX_NAME, CHILD_TYPE_NAME, id);
+ indexRequestbuilder = indexRequestbuilder.setParent(id);
+ indexRequestbuilder = indexRequestbuilder.setSource(builder.map(objectMap));
+ indexRequestbuilder.execute().actionGet();
+ }
+
+ /**
+ * Execute a search based on a JSON String in QueryDSL format.
+ * <p/>
+ * Throws a RuntimeException if there are any shard failures to
+ * elevate the visibility of the problem.
+ */
+ public List<String> executeSearch(String source) {
+ SearchRequest request = Requests.searchRequest(INDEX_NAME).source(source);
+
+ List<ShardSearchFailure> failures;
+ SearchResponse response;
+
+ response = client.search(request).actionGet();
+ failures = Arrays.asList(response.getShardFailures());
+
+ // throw an exception so that we see the shard failures
+ if (failures.size() != 0) {
+ String failuresStr = failures.toString();
+ if (!failuresStr.contains("reason [No active shards]")) {
+ throw new RuntimeException(failures.toString());
+ }
+ }
+
+ ArrayList<String> results = new ArrayList<String>();
+ if (response != null) {
+ for (SearchHit hit : response.getHits()) {
+ String sourceStr = hit.sourceAsString();
+ results.add(sourceStr);
+ }
+ }
+ return results;
+ }
+
+ /**
+ * Create a document as a parent and index it.
+ * Load a file and index it as a child.
+ */
+ public String indexDoc() throws IOException {
+ String id = UUID.randomUUID().toString();
+
+ Map<String, Object> objectMap = new HashMap<String, Object>();
+ objectMap.put("title", "this is a document");
+
+ Map<String, Object> objectMap2 = new HashMap<String, Object>();
+ objectMap2.put("description", "child test");
+
+ this.indexParent(id, objectMap);
+ this.indexChild(id, objectMap2);
+ return id;
+ }
+
+ /**
+ * Perform the has_child query for the doc.
+ * <p/>
+ * Since it might take time to get indexed, it
+ * loops until it finds the doc.
+ */
+ public void searchDocByChild() throws InterruptedException {
+ String dslString =
+ "{\"query\":{" +
+ "\"has_child\":{" +
+ "\"query\":{" +
+ "\"field\":{" +
+ "\"description\":\"child test\"}}," +
+ "\"type\":\"contentFiles\"}}}";
+
+ int numTries = 0;
+ List<String> items = new ArrayList<String>();
+
+ while (items.size() != 1 && numTries < 20) {
+ items = executeSearch(dslString);
+
+ numTries++;
+ if (items.size() != 1) {
+ Thread.sleep(250);
+ }
+ }
+ if (items.size() != 1) {
+ System.out.println("Exceeded number of retries");
+ System.exit(1);
+ }
+ }
+
+ /**
+ * Program to loop on:
+ * create parent/child doc
+ * search for the doc
+ * delete the doc
+ * repeat the above until shard failure.
+ * <p/>
+ * Eventually fails with:
+ * <p/>
+ * [shard [[74wz0lrXRSmSOsJOqgPvlw][acme][1]], reason [RemoteTransportException
+ * [[Kismet][inet[/10.10.30.52:9300]][search/phase/query]]; nested:
+ * QueryPhaseExecutionException[[acme][1]:
+ * query[ConstantScore(child_filter[contentFiles
+ * /content](filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(
+ * _type:contentFiles)))],from[0],size[10]: Query Failed [Failed to execute
+ * child query [filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(_type:contentFiles)]]]; nested:
+ * ]]
+ *
+ * @param args
+ */
+ public static void main(String[] args) {
+ ParentChildStressTest elasticTest = new ParentChildStressTest();
+ try {
+ // loop a bunch of times - usually fails before the count is done.
+ int NUM_LOOPS = 1000;
+ System.out.println();
+ System.out.println("Looping [" + NUM_LOOPS + "] times:");
+ System.out.println();
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ String id = elasticTest.indexDoc();
+
+ elasticTest.searchDocByChild();
+
+ elasticTest.deleteById(id);
+
+ System.out.println(" Success: " + i);
+ }
+ elasticTest.shutdown();
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ elasticTest.shutdown();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
new file mode 100644
index 0000000..a1751ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import jsr166y.ThreadLocalRandom;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.junit.Ignore;
+
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+/**
+ *
+ */
+@Ignore("Stress Test")
+public class Search1StressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+
+ private int numberOfNodes = 4;
+
+ private int indexers = 0;
+ private SizeValue preIndexDocs = new SizeValue(0);
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+ private int searchers = 0;
+ private TimeValue searcherThrottle = TimeValue.timeValueMillis(20);
+ private int numberOfIndices = 10;
+ private int numberOfTypes = 4;
+ private int numberOfValues = 20;
+ private int numberOfHits = 300;
+ private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000);
+ private TimeValue deleteByQueryThrottle = TimeValue.timeValueMillis(5000);
+
+ private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong searchCounter = new AtomicLong();
+
+
+ private Node client;
+
+ public Search1StressTest setNumberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public Search1StressTest setPreIndexDocs(SizeValue preIndexDocs) {
+ this.preIndexDocs = preIndexDocs;
+ return this;
+ }
+
+ public Search1StressTest setIndexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public Search1StressTest setIndexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSearchers(int searchers) {
+ this.searchers = searchers;
+ return this;
+ }
+
+ public Search1StressTest setSearcherThrottle(TimeValue searcherThrottle) {
+ this.searcherThrottle = searcherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfTypes(int numberOfTypes) {
+ this.numberOfTypes = numberOfTypes;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfValues(int numberOfValues) {
+ this.numberOfValues = numberOfValues;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfHits(int numberOfHits) {
+ this.numberOfHits = numberOfHits;
+ return this;
+ }
+
+ public Search1StressTest setFlusherThrottle(TimeValue flusherThrottle) {
+ this.flusherThrottle = flusherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setDeleteByQueryThrottle(TimeValue deleteByQueryThrottle) {
+ this.deleteByQueryThrottle = deleteByQueryThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSettings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public Search1StressTest setPeriod(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ private String nextIndex() {
+ return "test" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices;
+ }
+
+ private String nextType() {
+ return "type" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfTypes;
+ }
+
+ private int nextNumValue() {
+ return Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private String nextFieldValue() {
+ return "value" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private class Searcher extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ String indexName = nextIndex();
+ SearchRequestBuilder builder = client.client().prepareSearch(indexName);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.addSort("num", SortOrder.DESC);
+ } else if (ThreadLocalRandom.current().nextBoolean()) {
+ // add a _score based sorting, won't do any sorting, just to test...
+ builder.addSort("_score", SortOrder.DESC);
+ }
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.setSearchType(SearchType.DFS_QUERY_THEN_FETCH);
+ }
+ int size = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfHits;
+ builder.setSize(size);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ // update from
+ builder.setFrom(size / 2);
+ }
+ String value = nextFieldValue();
+ builder.setQuery(termQuery("field", value));
+ searchCounter.incrementAndGet();
+ SearchResponse searchResponse = builder.execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ // verify that all come from the requested index
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!hit.shard().index().equals(indexName)) {
+ logger.warn("got wrong index, asked for [{}], got [{}]", indexName, hit.shard().index());
+ }
+ }
+ // verify that all has the relevant value
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!value.equals(hit.sourceAsMap().get("field"))) {
+ logger.warn("got wrong field, asked for [{}], got [{}]", value, hit.sourceAsMap().get("field"));
+ }
+ }
+ Thread.sleep(searcherThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to search", e);
+ }
+ }
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private class Flusher extends Thread {
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ client.client().admin().indices().prepareFlush().execute().actionGet();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to flush / sleep", e);
+ }
+ }
+ }
+ }
+
+ private class DeleteByQuery extends Thread {
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ client.client().prepareDeleteByQuery().setQuery(termQuery("num", nextNumValue())).execute().actionGet();
+ Thread.sleep(deleteByQueryThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to delete_by_query", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc() throws Exception {
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("num", nextNumValue())
+ .field("field", nextFieldValue());
+
+ json.endObject();
+
+ client.client().prepareIndex(nextIndex(), nextType())
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public void run() throws Exception {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ }
+
+ logger.info("Pre indexing docs [{}]...", preIndexDocs);
+ for (long i = 0; i < preIndexDocs.singles(); i++) {
+ indexDoc();
+ }
+ logger.info("Done pre indexing docs [{}]", preIndexDocs);
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.start();
+ }
+
+ Thread.sleep(10000);
+
+ Searcher[] searcherThreads = new Searcher[searchers];
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher();
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.start();
+ }
+
+ Flusher flusher = null;
+ if (flusherThrottle.millis() > 0) {
+ flusher = new Flusher();
+ flusher.start();
+ }
+
+ DeleteByQuery deleteByQuery = null;
+ if (deleteByQueryThrottle.millis() > 0) {
+ deleteByQuery = new DeleteByQuery();
+ deleteByQuery.start();
+ }
+
+
+ long testStart = System.currentTimeMillis();
+
+ while (true) {
+ Thread.sleep(5000);
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ break;
+ }
+ }
+
+ System.out.println("DONE, closing .....");
+
+ if (flusher != null) {
+ flusher.close = true;
+ }
+
+ if (deleteByQuery != null) {
+ deleteByQuery.close = true;
+ }
+
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.close = true;
+ }
+
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ if (flusher != null && !flusher.closed) {
+ logger.warn("flusher not closed!");
+ }
+ if (deleteByQuery != null && !deleteByQuery.closed) {
+ logger.warn("deleteByQuery not closed!");
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ if (!searcherThread.closed) {
+ logger.warn("search thread not closed!");
+ }
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ if (!indexerThread.closed) {
+ logger.warn("index thread not closed!");
+ }
+ }
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("********** DONE, indexed [" + indexCounter.get() + "], searched [" + searchCounter.get() + "]");
+ }
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = ImmutableSettings.settingsBuilder()
+ .put("gateway.type", "none")
+ .build();
+
+ Search1StressTest test = new Search1StressTest()
+ .setPeriod(TimeValue.timeValueMinutes(10))
+ .setSettings(settings)
+ .setNumberOfNodes(2)
+ .setPreIndexDocs(SizeValue.parseSizeValue("100"))
+ .setIndexers(2)
+ .setIndexerThrottle(TimeValue.timeValueMillis(100))
+ .setSearchers(10)
+ .setSearcherThrottle(TimeValue.timeValueMillis(10))
+ .setDeleteByQueryThrottle(TimeValue.timeValueMillis(-1))
+ .setFlusherThrottle(TimeValue.timeValueMillis(1000))
+ .setNumberOfIndices(10)
+ .setNumberOfTypes(5)
+ .setNumberOfValues(50)
+ .setNumberOfHits(300);
+
+ test.run();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
new file mode 100644
index 0000000..6e2428b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecidersModule;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.lang.reflect.Constructor;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/**
+ */
+public class ElasticsearchAllocationTestCase extends ElasticsearchTestCase {
+
+ public static AllocationService createAllocationService() {
+ return createAllocationService(ImmutableSettings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static AllocationService createAllocationService(Settings settings) {
+ return createAllocationService(settings, getRandom());
+ }
+
+ public static AllocationService createAllocationService(Settings settings, Random random) {
+ return new AllocationService(settings,
+ randomAllocationDeciders(settings, new NodeSettingsService(ImmutableSettings.Builder.EMPTY_SETTINGS), random),
+ new ShardsAllocators(settings), ClusterInfoService.EMPTY);
+ }
+
+ public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ final ImmutableSet<Class<? extends AllocationDecider>> defaultAllocationDeciders = AllocationDecidersModule.DEFAULT_ALLOCATION_DECIDERS;
+ final List<AllocationDecider> list = new ArrayList<AllocationDecider>();
+ for (Class<? extends AllocationDecider> deciderClass : defaultAllocationDeciders) {
+ try {
+ try {
+ Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class);
+ list.add(constructor.newInstance(settings, nodeSettingsService));
+ } catch (NoSuchMethodException e) {
+ Constructor<? extends AllocationDecider> constructor = null;
+ constructor = deciderClass.getConstructor(Settings.class);
+ list.add(constructor.newInstance(settings));
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ assertThat(list.size(), equalTo(defaultAllocationDeciders.size()));
+ for (AllocationDecider d : list) {
+ assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
+ }
+ Collections.shuffle(list, random);
+ return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+
+ }
+
+ public static DiscoveryNode newNode(String nodeId) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, TransportAddress address) {
+ return new DiscoveryNode(nodeId, address, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
+ return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Version version) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, version);
+ }
+
+ public static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
+ List<MutableShardRouting> initializingShards = clusterState.routingNodes().shardsWithState(INITIALIZING);
+ if (initializingShards.isEmpty()) {
+ return clusterState;
+ }
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, newArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();
+ return ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
new file mode 100644
index 0000000..4be71bb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
@@ -0,0 +1,878 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.google.common.base.Joiner;
+import org.apache.lucene.util.AbstractRandomizedTest;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.client.RandomizingClient;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.test.TestCluster.clusterName;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * {@link ElasticsearchIntegrationTest} is an abstract base class to run integration
+ * tests against a JVM private Elasticsearch Cluster. The test class supports 3 different
+ * cluster scopes.
+ * <ul>
+ * <li>{@link Scope#GLOBAL} - uses a cluster shared across test suites. This cluster doesn't allow any modifications to
+ * the cluster settings and will fail if any persistent cluster settings are applied during tear down.</li>
+ * <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li>
+ * <li>{@link Scope#SUITE} - uses a cluster shared across all test method in the same suite</li>
+ * </ul>
+ * <p/>
+ * The most common test scope it {@link Scope#GLOBAL} which shares a cluster per JVM. This cluster is only set-up once
+ * and can be used as long as the tests work on a per index basis without changing any cluster wide settings or require
+ * any specific node configuration. This is the best performing option since it sets up the cluster only once.
+ * <p/>
+ * If the tests need specific node settings or change persistent and/or transient cluster settings either {@link Scope#TEST}
+ * or {@link Scope#SUITE} should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation
+ * should be used, here is an example:
+ * <pre>
+ *
+ * @ClusterScope(scope=Scope.TEST) public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * If no {@link ClusterScope} annotation is present on an integration test the default scope it {@link Scope#GLOBAL}
+ * <p/>
+ * A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is
+ * determined at random and can change across tests. The minimum number of nodes in the shared global cluster is <code>2</code>.
+ * For other scopes the {@link ClusterScope} allows configuring the initial number of nodes that are created before
+ * the tests start.
+ * <p/>
+ * <pre>
+ * @ClusterScope(scope=Scope.SUITE, numNodes=3)
+ * public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * Note, the {@link ElasticsearchIntegrationTest} uses randomized settings on a cluster and index level. For instance
+ * each test might use different directory implementation for each test or will return a random client to one of the
+ * nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct
+ * system properties are passed to the test execution environment.
+ * <p/>
+ * <p>
+ * This class supports the following system properties (passed with -Dkey=value to the application)
+ * <ul>
+ * <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
+ * <li>-D{@value TestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
+ * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li>
+ * <li>-D{@value org.elasticsearch.test.TestCluster#SETTING_INDEX_SEED} - a random seed used to initialize the index random context.
+ * </ul>
+ * </p>
+ */
+@Ignore
+@AbstractRandomizedTest.IntegrationTests
+public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
+ private static TestCluster GLOBAL_CLUSTER;
+
+ /**
+ * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
+ */
+ public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
+
+ /**
+ * The current cluster depending on the configured {@link Scope}.
+ * By default if no {@link ClusterScope} is configured this will hold a reference to the global cluster carried
+ * on across test suites.
+ */
+ private static TestCluster currentCluster;
+
+ private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
+
+ private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<Class<?>, TestCluster>();
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ // Initialize lazily. No need for volatiles/ CASs since each JVM runs at most one test
+ // suite at any given moment.
+ if (GLOBAL_CLUSTER == null) {
+ long masterSeed = SeedUtils.parseSeed(RandomizedContext.current().getRunnerSeedAsString());
+ GLOBAL_CLUSTER = new TestCluster(masterSeed, clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, masterSeed));
+ }
+ }
+
+ @Before
+ public final void before() throws IOException {
+ assert Thread.getDefaultUncaughtExceptionHandler() instanceof ElasticsearchUncaughtExceptionHandler;
+ try {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ switch (currentClusterScope) {
+ case GLOBAL:
+ clearClusters();
+ currentCluster = GLOBAL_CLUSTER;
+ break;
+ case SUITE:
+ currentCluster = buildAndPutCluster(currentClusterScope, false);
+ break;
+ case TEST:
+ currentCluster = buildAndPutCluster(currentClusterScope, true);
+ break;
+ default:
+ fail("Unknown Scope: [" + currentClusterScope + "]");
+ }
+ currentCluster.beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster().wipe();
+ cluster().randomIndexTemplate();
+ logger.info("[{}#{}]: before test", getTestClass().getSimpleName(), getTestName());
+ } catch (OutOfMemoryError e) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ ElasticsearchTestCase.printStackDump(logger);
+ }
+ throw e;
+ }
+ }
+
+ public TestCluster buildAndPutCluster(Scope currentClusterScope, boolean createIfExists) throws IOException {
+ TestCluster testCluster = clusters.get(this.getClass());
+ if (createIfExists || testCluster == null) {
+ testCluster = buildTestCluster(currentClusterScope);
+ } else {
+ clusters.remove(this.getClass());
+ }
+ clearClusters();
+ clusters.put(this.getClass(), testCluster);
+ return testCluster;
+ }
+
+ private void clearClusters() throws IOException {
+ if (!clusters.isEmpty()) {
+ for (TestCluster cluster : clusters.values()) {
+ cluster.close();
+ }
+ clusters.clear();
+ }
+ }
+
+ @After
+ public final void after() throws IOException {
+ try {
+ logger.info("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName());
+ Scope currentClusterScope = getCurrentClusterScope();
+ if (currentClusterScope == Scope.TEST) {
+ clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
+ } else {
+ MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData
+ .persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData
+ .transientSettings().getAsMap().size(), equalTo(0));
+
+ }
+ cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete
+ cluster().assertAfterTest();
+ logger.info("[{}#{}]: cleaned up after test", getTestClass().getSimpleName(), getTestName());
+ } catch (OutOfMemoryError e) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ ElasticsearchTestCase.printStackDump(logger);
+ }
+ throw e;
+ } finally {
+ currentCluster.afterTest();
+ currentCluster = null;
+ }
+ }
+
+ public static TestCluster cluster() {
+ return currentCluster;
+ }
+
+ public ClusterService clusterService() {
+ return cluster().clusterService();
+ }
+
+ public static Client client() {
+ Client client = cluster().client();
+ if (frequently()) {
+ client = new RandomizingClient((InternalClient) client, getRandom());
+ }
+ return client;
+ }
+
+ public static Iterable<Client> clients() {
+ return cluster();
+ }
+
+ /**
+ * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
+ * This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
+ * By default it returns an empty settings object.
+ */
+ public Settings indexSettings() {
+ return ImmutableSettings.EMPTY;
+ }
+
+ /**
+ * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices
+ * already exists this method will fail and wipe all the indices created so far.
+ */
+ public final void createIndex(String... names) {
+
+ List<String> created = new ArrayList<String>();
+ for (String name : names) {
+ boolean success = false;
+ try {
+ assertAcked(prepareCreate(name));
+ created.add(name);
+ success = true;
+ } finally {
+ if (!success && !created.isEmpty()) {
+ cluster().wipeIndices(created.toArray(new String[created.size()]));
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index) {
+ return client().admin().indices().prepareCreate(index).setSettings(indexSettings());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
+ return prepareCreate(index, numNodes, ImmutableSettings.builder());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, ImmutableSettings.Builder builder) {
+ cluster().ensureAtLeastNumNodes(numNodes);
+ Settings settings = indexSettings();
+ builder.put(settings);
+ if (numNodes > 0) {
+ getExcludeSettings(index, numNodes, builder);
+ }
+ return client().admin().indices().prepareCreate(index).setSettings(builder.build());
+ }
+
+ private ImmutableSettings.Builder getExcludeSettings(String index, int num, ImmutableSettings.Builder builder) {
+ String exclude = Joiner.on(',').join(cluster().allButN(num));
+ builder.put("index.routing.allocation.exclude._name", exclude);
+ return builder;
+ }
+
+ /**
+ * Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders.
+ * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on
+ * more than <code>n</code> nodes.
+ */
+ public void allowNodes(String index, int n) {
+ assert index != null;
+ cluster().ensureAtLeastNumNodes(n);
+ ImmutableSettings.Builder builder = ImmutableSettings.builder();
+ if (n > 0) {
+ getExcludeSettings(index, n, builder);
+ }
+ Settings build = builder.build();
+ if (!build.getAsMap().isEmpty()) {
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits for all relocating shards to become active using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation() {
+ return waitForRelocation(null);
+ }
+
+ /**
+ * Waits for all relocating shards to become active and the cluster has reached the given health status
+ * using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
+ ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
+ if (status != null) {
+ request.waitForStatus(status);
+ }
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ if (status != null) {
+ assertThat(actionGet.getStatus(), equalTo(status));
+ }
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Sets the cluster's minimum master node and make sure the response is acknowledge.
+ * Note: this doesn't guaranty the new settings is in effect, just that it has been received bu all nodes.
+ */
+ public void setMinimumMasterNodes(int n) {
+ assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n))
+ .get().isAcknowledged());
+ }
+
+ /**
+ * Ensures the cluster has a yellow state via the cluster health API.
+ */
+ public ClusterHealthStatus ensureYellow(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false));
+ }
+ return actionGet.getStatus();
+ }
+
+
+ /**
+ * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each
+ * shard is available on the cluster.
+ */
+ protected ClusterHealthStatus ensureSearchable(String... indices) {
+ // this is just a temporary thing but it's easier to change if it is encapsulated.
+ return ensureGreen(indices);
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, XContentBuilder source) {
+ return client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareGet(index, type, id).execute().actionGet();
+ * </pre>
+ */
+ protected final GetResponse get(String index, String type, String id) {
+ return client().prepareGet(index, type, id).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, XContentBuilder source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Object... source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Waits for relocations and refreshes all indices in the cluster.
+ *
+ * @see #waitForRelocation()
+ */
+ protected final RefreshResponse refresh() {
+ waitForRelocation();
+ // TODO RANDOMIZE with flush?
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Flushes and refreshes all indices in the cluster
+ */
+ protected final void flushAndRefresh() {
+ flush(true);
+ refresh();
+ }
+
+ /**
+ * Flushes all indices in the cluster
+ */
+ protected final FlushResponse flush() {
+ return flush(true);
+ }
+
+ private FlushResponse flush(boolean ignoreNotAllowed) {
+ waitForRelocation();
+ FlushResponse actionGet = client().admin().indices().prepareFlush().execute().actionGet();
+ if (ignoreNotAllowed) {
+ for (ShardOperationFailedException failure : actionGet.getShardFailures()) {
+ assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ } else {
+ assertNoFailures(actionGet);
+ }
+ return actionGet;
+ }
+
+ /**
+ * Waits for all relocations and optimized all indices in the cluster to 1 segment.
+ */
+ protected OptimizeResponse optimize() {
+ waitForRelocation();
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Returns <code>true</code> iff the given index exists otherwise <code>false</code>
+ */
+ protected boolean indexExists(String index) {
+ IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
+ return actionGet.isExists();
+ }
+
+ /**
+ * Returns a random admin client. This client can either be a node or a transport client pointing to any of
+ * the nodes in the cluster.
+ */
+ protected AdminClient admin() {
+ return client().admin();
+ }
+
+ /**
+ * Convenience method that forwards to {@link #indexRandom(boolean, List)}.
+ */
+ public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, Arrays.asList(builders));
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ */
+ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ if (builders.size() == 0) {
+ return;
+ }
+
+ Random random = getRandom();
+ Set<String> indicesSet = new HashSet<String>();
+ for (IndexRequestBuilder builder : builders) {
+ indicesSet.add(builder.request().index());
+ }
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ Collections.shuffle(builders, random);
+ final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>>();
+ List<CountDownLatch> latches = new ArrayList<CountDownLatch>();
+ if (frequently()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
+ final CountDownLatch latch = new CountDownLatch(builders.size());
+ latches.add(latch);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, latch, errors));
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<RefreshResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<FlushResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenient()).setMaxNumSegments(between(1, 10)).setFlush(random.nextBoolean()).execute(new LatchedActionListener<OptimizeResponse>(newLatch(latches)));
+ }
+ }
+ }
+
+ } else if (randomBoolean()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute().actionGet();
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<RefreshResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenient()).execute(new LatchedActionListener<FlushResponse>(newLatch(latches)));
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenient()).setMaxNumSegments(between(1, 10)).setFlush(random.nextBoolean()).execute(new LatchedActionListener<OptimizeResponse>(newLatch(latches)));
+ }
+ }
+ }
+ } else {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, true);
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ bulkBuilder.add(indexRequestBuilder);
+ }
+ BulkResponse actionGet = bulkBuilder.execute().actionGet();
+ assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
+ }
+ for (CountDownLatch countDownLatch : latches) {
+ countDownLatch.await();
+ }
+ final List<Throwable> actualErrors = new ArrayList<Throwable>();
+ for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) {
+ if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
+ tuple.v1().execute().actionGet(); // re-index if rejected
+ } else {
+ actualErrors.add(tuple.v2());
+ }
+ }
+ assertThat(actualErrors, emptyIterable());
+ if (forceRefresh) {
+ assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenient()).execute().get());
+ }
+ }
+
+ private static CountDownLatch newLatch(List<CountDownLatch> latches) {
+ CountDownLatch l = new CountDownLatch(1);
+ latches.add(l);
+ return l;
+ }
+
+ private class LatchedActionListener<Response> implements ActionListener<Response> {
+ private final CountDownLatch latch;
+
+ public LatchedActionListener(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public final void onResponse(Response response) {
+ latch.countDown();
+ }
+
+ @Override
+ public final void onFailure(Throwable t) {
+ try {
+ logger.info("Action Failed", t);
+ addError(t);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ protected void addError(Throwable t) {
+ }
+
+ }
+
+ private class PayloadLatchedActionListener<Response, T> extends LatchedActionListener<Response> {
+ private final CopyOnWriteArrayList<Tuple<T, Throwable>> errors;
+ private final T builder;
+
+ public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList<Tuple<T, Throwable>> errors) {
+ super(latch);
+ this.errors = errors;
+ this.builder = builder;
+ }
+
+ protected void addError(Throwable t) {
+ errors.add(new Tuple<T, Throwable>(builder, t));
+ }
+
+ }
+
+ /**
+ * Clears the given scroll Ids
+ */
+ public void clearScroll(String... scrollIds) {
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .setScrollIds(Arrays.asList(scrollIds)).get();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+ }
+
+
+ /**
+ * The scope of a test cluster used together with
+ * {@link ClusterScope} annotations on {@link ElasticsearchIntegrationTest} subclasses.
+ */
+ public static enum Scope {
+ /**
+ * A globally shared cluster. This cluster doesn't allow modification of transient or persistent
+ * cluster settings.
+ */
+ GLOBAL,
+ /**
+ * A cluster shared across all method in a single test suite
+ */
+ SUITE,
+ /**
+ * A test exclusive test cluster
+ */
+ TEST
+ }
+
+ private ClusterScope getAnnotation(Class<?> clazz) {
+ if (clazz == Object.class || clazz == ElasticsearchIntegrationTest.class) {
+ return null;
+ }
+ ClusterScope annotation = clazz.getAnnotation(ClusterScope.class);
+ if (annotation != null) {
+ return annotation;
+ }
+ return getAnnotation(clazz.getSuperclass());
+ }
+
+ private Scope getCurrentClusterScope() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ // if we are not annotated assume global!
+ return annotation == null ? Scope.GLOBAL : annotation.scope();
+ }
+
+ private int getNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? -1 : annotation.numNodes();
+ }
+
+ private int getMinNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? TestCluster.DEFAULT_MIN_NUM_NODES : annotation.minNumNodes();
+ }
+
+ private int getMaxNumNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? TestCluster.DEFAULT_MAX_NUM_NODES : annotation.maxNumNodes();
+ }
+
+ /**
+ * This method is used to obtain settings for the <tt>Nth</tt> node in the cluster.
+ * Nodes in this cluster are associated with an ordinal number such that nodes can
+ * be started with specific configurations. This method might be called multiple
+ * times with the same ordinal and is expected to return the same value for each invocation.
+ * In other words subclasses must ensure this method is idempotent.
+ */
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.EMPTY;
+ }
+
+ private TestCluster buildTestCluster(Scope scope) {
+ long currentClusterSeed = randomLong();
+ int numNodes = getNumNodes();
+ NodeSettingsSource nodeSettingsSource;
+ if (numNodes > 0) {
+ NodeSettingsSource.Immutable.Builder nodesSettings = NodeSettingsSource.Immutable.builder();
+ for (int i = 0; i < numNodes; i++) {
+ nodesSettings.set(i, nodeSettings(i));
+ }
+ nodeSettingsSource = nodesSettings.build();
+ } else {
+ nodeSettingsSource = new NodeSettingsSource() {
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return nodeSettings(nodeOrdinal);
+ }
+ };
+ }
+
+ int minNumNodes, maxNumNodes;
+ if (numNodes >= 0) {
+ minNumNodes = maxNumNodes = numNodes;
+ } else {
+ minNumNodes = getMinNumNodes();
+ maxNumNodes = getMaxNumNodes();
+ }
+
+ return new TestCluster(currentClusterSeed, minNumNodes, maxNumNodes, clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource);
+ }
+
+ /**
+ * Defines a cluster scope for a {@link ElasticsearchIntegrationTest} subclass.
+ * By default if no {@link ClusterScope} annotation is present {@link Scope#GLOBAL} is used
+ * together with randomly chosen settings like number of nodes etc.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ public @interface ClusterScope {
+ /**
+ * Returns the scope. {@link Scope#GLOBAL} is default.
+ */
+ Scope scope() default Scope.GLOBAL;
+
+ /**
+ * Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means
+ * a random number of nodes is used, where the minimum and maximum number of nodes
+ * are either the specified ones or the default ones if not specified.
+ */
+ int numNodes() default -1;
+
+ /**
+ * Returns the minimum number of nodes in the cluster. Default is {@link TestCluster#DEFAULT_MIN_NUM_NODES}.
+ * Ignored when {@link ClusterScope#numNodes()} is set.
+ */
+ int minNumNodes() default TestCluster.DEFAULT_MIN_NUM_NODES;
+
+ /**
+ * Returns the maximum number of nodes in the cluster. Default is {@link TestCluster#DEFAULT_MAX_NUM_NODES}.
+ * Ignored when {@link ClusterScope#numNodes()} is set.
+ */
+ int maxNumNodes() default TestCluster.DEFAULT_MAX_NUM_NODES;
+
+ /**
+ * Returns the transport client ratio. By default this returns <code>-1</code> which means a random
+ * ratio in the interval <code>[0..1]</code> is used.
+ */
+ double transportClientRatio() default -1;
+ }
+
+ /**
+ * Returns the client ratio configured via
+ */
+ private static double transportClientRatio() {
+ String property = System.getProperty(TESTS_CLIENT_RATIO);
+ if (property == null || property.isEmpty()) {
+ return Double.NaN;
+ }
+ return Double.parseDouble(property);
+ }
+
+ /**
+ * Returns the transport client ratio from the class level annotation or via
+ * {@link System#getProperty(String)} if available. If both are not available this will
+ * return a random ratio in the interval <tt>[0..1]</tt>
+ */
+ protected double getPerTestTransportClientRatio() {
+ final ClusterScope annotation = getAnnotation(this.getClass());
+ double perTestRatio = -1;
+ if (annotation != null) {
+ perTestRatio = annotation.transportClientRatio();
+ }
+ if (perTestRatio == -1) {
+ return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO;
+ }
+ assert perTestRatio >= 0.0 && perTestRatio <= 1.0;
+ return perTestRatio;
+ }
+
+ /**
+ * Returns a random numeric field data format from the choices of "array",
+ * "compressed", or "doc_values".
+ */
+ public static String randomNumericFieldDataFormat() {
+ return randomFrom(Arrays.asList("array", "compressed", "doc_values"));
+ }
+
+ /**
+ * Returns a random bytes field data format from the choices of
+ * "paged_bytes", "fst", or "doc_values".
+ */
+ public static String randomBytesFieldDataFormat() {
+ return randomFrom(Arrays.asList("paged_bytes", "fst", "doc_values"));
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java
new file mode 100644
index 0000000..e9eebbe
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.*;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+
+/**
+ * Base testcase for lucene based testing. This class should be used if low level lucene features are tested.
+ */
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = TimeUnits.HOUR)
+@SuppressCodecs("Lucene3x")
+public abstract class ElasticsearchLuceneTestCase extends LuceneTestCase {
+
+ private static final Codec DEFAULT_CODEC = Codec.getDefault();
+
+ /**
+ * Forcefully reset the default codec
+ */
+ public static void forceDefaultCodec() {
+ Codec.setDefault(DEFAULT_CODEC);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
new file mode 100644
index 0000000..eee6a2c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.*;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.AbstractRandomizedTest;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.net.URI;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed;
+
+/**
+ * Base testcase for randomized unit testing with Elasticsearch
+ */
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = 20 * TimeUnits.MINUTE) // timeout the suite after 20min and fail the test.
+@Listeners(LoggingListener.class)
+public abstract class ElasticsearchTestCase extends AbstractRandomizedTest {
+
+ private static Thread.UncaughtExceptionHandler defaultHandler;
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ public static final String CHILD_VM_ID = System.getProperty("junit4.childvm.id", "" + System.currentTimeMillis());
+
+ public static final String TESTS_SECURITY_MANAGER = System.getProperty("tests.security.manager");
+
+ public static final String JAVA_SECURTY_POLICY = System.getProperty("java.security.policy");
+
+ public static final boolean ASSERTIONS_ENABLED;
+ static {
+ boolean enabled = false;
+ assert enabled = true;
+ ASSERTIONS_ENABLED = enabled;
+ if (Boolean.parseBoolean(Strings.hasLength(TESTS_SECURITY_MANAGER) ? TESTS_SECURITY_MANAGER : "true") && JAVA_SECURTY_POLICY != null) {
+ System.setSecurityManager(new SecurityManager());
+ }
+
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate) throws InterruptedException {
+ return awaitBusy(breakPredicate, 10, TimeUnit.SECONDS);
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate, long maxWaitTime, TimeUnit unit) throws InterruptedException {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
+ long timeInMillis = 1;
+ long sum = 0;
+ for (int i = 0; i < iterations; i++) {
+ if (breakPredicate.apply(null)) {
+ return true;
+ }
+ sum += timeInMillis;
+ Thread.sleep(timeInMillis);
+ timeInMillis *= 2;
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ return breakPredicate.apply(null);
+ }
+
+ private static final String[] numericTypes = new String[]{"byte", "short", "integer", "long"};
+
+ public static String randomNumericType(Random random) {
+ return numericTypes[random.nextInt(numericTypes.length)];
+ }
+
+ /**
+ * Returns a {@link File} pointing to the class path relative resource given
+ * as the first argument. In contrast to
+ * <code>getClass().getResource(...).getFile()</code> this method will not
+ * return URL encoded paths if the parent path contains spaces or other
+ * non-standard characters.
+ */
+ public File getResource(String relativePath) {
+ URI uri = URI.create(getClass().getResource(relativePath).toString());
+ return new File(uri);
+ }
+
+ @After
+ public void ensureAllPagesReleased() {
+ MockPageCacheRecycler.ensureAllPagesAreReleased();
+ }
+
+ public static boolean hasUnclosedWrapper() {
+ for (MockDirectoryWrapper w : MockDirectoryHelper.wrappers) {
+ if (w.isOpen()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @BeforeClass
+ public static void registerMockDirectoryHooks() throws Exception {
+ closeAfterSuite(new Closeable() {
+ @Override
+ public void close() throws IOException {
+ assertAllFilesClosed();
+ }
+ });
+
+ closeAfterSuite(new Closeable() {
+ @Override
+ public void close() throws IOException {
+ assertAllSearchersClosed();
+ }
+ });
+ defaultHandler = Thread.getDefaultUncaughtExceptionHandler();
+ Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(defaultHandler));
+ }
+
+ @AfterClass
+ public static void resetUncaughtExceptionHandler() {
+ Thread.setDefaultUncaughtExceptionHandler(defaultHandler);
+ }
+
+ public static boolean maybeDocValues() {
+ return LuceneTestCase.defaultCodecSupportsSortedSet() && randomBoolean();
+ }
+
+ private static final List<Version> SORTED_VERSIONS;
+
+ static {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ Set<Integer> ids = new HashSet<Integer>();
+ for (Field field : declaredFields) {
+ final int mod = field.getModifiers();
+ if (Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
+ if (field.getType() == Version.class) {
+ try {
+ Version object = (Version) field.get(null);
+ ids.add(object.id);
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+ List<Integer> idList = new ArrayList<Integer>(ids);
+ Collections.sort(idList);
+ Collections.reverse(idList);
+ ImmutableList.Builder<Version> version = ImmutableList.builder();
+ for (Integer integer : idList) {
+ version.add(Version.fromId(integer));
+ }
+ SORTED_VERSIONS = version.build();
+ }
+
+ public static Version getPreviousVersion() {
+ Version version = SORTED_VERSIONS.get(1);
+ assert version.before(Version.CURRENT);
+ return version;
+ }
+
+ public static Version randomVersion() {
+ return randomVersion(getRandom());
+ }
+
+ public static Version randomVersion(Random random) {
+ return SORTED_VERSIONS.get(random.nextInt(SORTED_VERSIONS.size()));
+ }
+
+ static final class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
+
+ private final Thread.UncaughtExceptionHandler parent;
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private ElasticsearchUncaughtExceptionHandler(Thread.UncaughtExceptionHandler parent) {
+ this.parent = parent;
+ }
+
+
+ @Override
+ public void uncaughtException(Thread t, Throwable e) {
+ if (e instanceof EsRejectedExecutionException) {
+ if (e.getMessage().contains(EsAbortPolicy.SHUTTING_DOWN_KEY)) {
+ return; // ignore the EsRejectedExecutionException when a node shuts down
+ }
+ } else if (e instanceof OutOfMemoryError) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ printStackDump(logger);
+ }
+ }
+ parent.uncaughtException(t, e);
+ }
+
+ }
+
+ protected static final void printStackDump(ESLogger logger) {
+ // print stack traces if we can't create any native thread anymore
+ Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces();
+ logger.error(formatThreadStacks(allStackTraces));
+ }
+
+ /**
+ * Dump threads and their current stack trace.
+ */
+ private static String formatThreadStacks(Map<Thread, StackTraceElement[]> threads) {
+ StringBuilder message = new StringBuilder();
+ int cnt = 1;
+ final Formatter f = new Formatter(message, Locale.ENGLISH);
+ for (Map.Entry<Thread, StackTraceElement[]> e : threads.entrySet()) {
+ if (e.getKey().isAlive())
+ f.format(Locale.ENGLISH, "\n %2d) %s", cnt++, threadName(e.getKey())).flush();
+ if (e.getValue().length == 0) {
+ message.append("\n at (empty stack)");
+ } else {
+ for (StackTraceElement ste : e.getValue()) {
+ message.append("\n at ").append(ste);
+ }
+ }
+ }
+ return message.toString();
+ }
+
+ private static String threadName(Thread t) {
+ return "Thread[" +
+ "id=" + t.getId() +
+ ", name=" + t.getName() +
+ ", state=" + t.getState() +
+ ", group=" + groupName(t.getThreadGroup()) +
+ "]";
+ }
+
+ private static String groupName(ThreadGroup threadGroup) {
+ if (threadGroup == null) {
+ return "{null group}";
+ } else {
+ return threadGroup.getName();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java b/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java
new file mode 100644
index 0000000..bd30e42
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchThreadFilter.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.ThreadFilter;
+
+/**
+ * Simple thread filter for randomized runner
+ */
+public final class ElasticsearchThreadFilter implements ThreadFilter {
+ @Override
+ public boolean reject(Thread t) {
+ return true;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
new file mode 100644
index 0000000..d42093c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
+@ThreadLeakScope(Scope.NONE)
+@TimeoutSuite(millis = TimeUnits.HOUR)
+
+/**
+ * Basic test case for token streams. the assertion methods in this class will
+ * run basic checks to enforce correct behavior of the token streams.
+ */
+public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase {
+
+ public static Version randomVersion() {
+ return ElasticsearchTestCase.randomVersion(random());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/NodeSettingsSource.java b/src/test/java/org/elasticsearch/test/NodeSettingsSource.java
new file mode 100644
index 0000000..b128afc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/NodeSettingsSource.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.Map;
+
+abstract class NodeSettingsSource {
+
+ public static final NodeSettingsSource EMPTY = new NodeSettingsSource() {
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return null;
+ }
+ };
+
+ /**
+ * @return the settings for the node represented by the given ordinal, or {@code null} if there are not settings defined (in which
+ * case a random settings will be generated for the node)
+ */
+ public abstract Settings settings(int nodeOrdinal);
+
+ public static class Immutable extends NodeSettingsSource {
+
+ private final Map<Integer, Settings> settingsPerNode;
+
+ private Immutable(Map<Integer, Settings> settingsPerNode) {
+ this.settingsPerNode = settingsPerNode;
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ @Override
+ public Settings settings(int nodeOrdinal) {
+ return settingsPerNode.get(nodeOrdinal);
+ }
+
+ public static class Builder {
+
+ private final ImmutableMap.Builder<Integer, Settings> settingsPerNode = ImmutableMap.builder();
+
+ private Builder() {
+ }
+
+ public Builder set(int ordinal, Settings settings) {
+ settingsPerNode.put(ordinal, settings);
+ return this;
+ }
+
+ public Immutable build() {
+ return new Immutable(settingsPerNode.build());
+ }
+
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java
new file mode 100644
index 0000000..2ca2364
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/TestCluster.java
@@ -0,0 +1,1300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchIllegalArgumentException;
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cache.recycler.CacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecyclerModule;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.ImmutableSettings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.engine.IndexEngineModule;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.merge.policy.*;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.SerialMergeSchedulerProvider;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexTemplateMissingException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalNode;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecyclerModule;
+import org.elasticsearch.test.engine.MockEngineModule;
+import org.elasticsearch.test.store.MockFSIndexStoreModule;
+import org.elasticsearch.test.transport.AssertingLocalTransportModule;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+import static com.google.common.collect.Maps.newTreeMap;
+import static org.apache.lucene.util.LuceneTestCase.rarely;
+import static org.apache.lucene.util.LuceneTestCase.usually;
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ * TestCluster manages a set of JVM private nodes and allows convenient access to them.
+ * The cluster supports randomized configuration such that nodes started in the cluster will
+ * automatically load asserting services tracking resources like file handles or open searchers.
+ * <p>
+ * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and
+ * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates
+ * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls
+ * are involved reproducibility is very limited. This class should only be used through {@link ElasticsearchIntegrationTest}.
+ * </p>
+ */
+public final class TestCluster implements Iterable<Client> {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ /**
+ * A boolean value to enable or disable mock modules. This is useful to test the
+ * system without asserting modules that to make sure they don't hide any bugs in
+ * production.
+ *
+ * @see ElasticsearchIntegrationTest
+ */
+ public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules";
+
+ /**
+ * A node level setting that holds a per node random seed that is consistent across node restarts
+ */
+ public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
+
+ /**
+ * Key used to retrieve the index random seed from the index settings on a running node.
+ * The value of this seed can be used to initialize a random context for a specific index.
+ * It's set once per test via a generic index template.
+ */
+ public static final String SETTING_INDEX_SEED = "index.tests.seed";
+
+ private static final String CLUSTER_NAME_KEY = "cluster.name";
+
+ private static final boolean ENABLE_MOCK_MODULES = systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true);
+
+ static final int DEFAULT_MIN_NUM_NODES = 2;
+ static final int DEFAULT_MAX_NUM_NODES = 6;
+
+ /* sorted map to make traverse order reproducible */
+ private final TreeMap<String, NodeAndClient> nodes = newTreeMap();
+
+ private final Set<File> dataDirToClean = new HashSet<File>();
+
+ private final String clusterName;
+
+ private final AtomicBoolean open = new AtomicBoolean(true);
+
+ private final Settings defaultSettings;
+
+ private Random random;
+
+ private AtomicInteger nextNodeId = new AtomicInteger(0);
+
+ /* Each shared node has a node seed that is used to start up the node and get default settings
+ * this is important if a node is randomly shut down in a test since the next test relies on a
+ * fully shared cluster to be more reproducible */
+ private final long[] sharedNodesSeeds;
+
+ private double transportClientRatio = 0.0;
+
+ private final NodeSettingsSource nodeSettingsSource;
+
+ public TestCluster(long clusterSeed, String clusterName) {
+ this(clusterSeed, DEFAULT_MIN_NUM_NODES, DEFAULT_MAX_NUM_NODES, clusterName, NodeSettingsSource.EMPTY);
+ }
+
+ public TestCluster(long clusterSeed, int minNumNodes, int maxNumNodes, String clusterName) {
+ this(clusterSeed, minNumNodes, maxNumNodes, clusterName, NodeSettingsSource.EMPTY);
+ }
+
+ public TestCluster(long clusterSeed, int minNumNodes, int maxNumNodes, String clusterName, NodeSettingsSource nodeSettingsSource) {
+ this.clusterName = clusterName;
+
+ if (minNumNodes < 0 || maxNumNodes < 0) {
+ throw new IllegalArgumentException("minimum and maximum number of nodes must be >= 0");
+ }
+
+ if (maxNumNodes < minNumNodes) {
+ throw new IllegalArgumentException("maximum number of nodes must be >= minimum number of nodes");
+ }
+
+ Random random = new Random(clusterSeed);
+
+ int numSharedNodes;
+ if (minNumNodes == maxNumNodes) {
+ numSharedNodes = minNumNodes;
+ } else {
+ numSharedNodes = minNumNodes + random.nextInt(maxNumNodes - minNumNodes);
+ }
+
+ assert numSharedNodes >= 0;
+ /*
+ * TODO
+ * - we might want start some master only nodes?
+ * - we could add a flag that returns a client to the master all the time?
+ * - we could add a flag that never returns a client to the master
+ * - along those lines use a dedicated node that is master eligible and let all other nodes be only data nodes
+ */
+ sharedNodesSeeds = new long[numSharedNodes];
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ sharedNodesSeeds[i] = random.nextLong();
+ }
+ logger.info("Setup TestCluster [{}] with seed [{}] using [{}] nodes", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedNodes);
+ this.nodeSettingsSource = nodeSettingsSource;
+ Builder builder = ImmutableSettings.settingsBuilder();
+ // randomize (multi/single) data path, special case for 0, don't set it at all...
+ int numOfDataPaths = random.nextInt(5);
+ if (numOfDataPaths > 0) {
+ StringBuilder dataPath = new StringBuilder();
+ for (int i = 0; i < numOfDataPaths; i++) {
+ dataPath.append("data/d").append(i).append(',');
+ }
+ builder.put("path.data", dataPath.toString());
+ }
+ defaultSettings = builder.build();
+
+ }
+
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ private static boolean isLocalTransportConfigured() {
+ if ("local".equals(System.getProperty("es.node.mode", "network"))) {
+ return true;
+ }
+ return Boolean.parseBoolean(System.getProperty("es.node.local", "false"));
+ }
+
+ private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) {
+ Builder builder = ImmutableSettings.settingsBuilder().put(defaultSettings)
+ .put(getRandomNodeSettings(nodeSeed));
+ Settings settings = nodeSettingsSource.settings(nodeOrdinal);
+ if (settings != null) {
+ if (settings.get(CLUSTER_NAME_KEY) != null) {
+ throw new ElasticsearchIllegalStateException("Tests must not set a '" + CLUSTER_NAME_KEY + "' as a node setting set '" + CLUSTER_NAME_KEY + "': [" + settings.get(CLUSTER_NAME_KEY) + "]");
+ }
+ builder.put(settings);
+ }
+ if (others != null) {
+ builder.put(others);
+ }
+ builder.put(CLUSTER_NAME_KEY, clusterName);
+ return builder.build();
+ }
+
+ private static Settings getRandomNodeSettings(long seed) {
+ Random random = new Random(seed);
+ Builder builder = ImmutableSettings.settingsBuilder()
+ /* use RAM directories in 10% of the runs */
+ //.put("index.store.type", random.nextInt(10) == 0 ? MockRamIndexStoreModule.class.getName() : MockFSIndexStoreModule.class.getName())
+ // decrease the routing schedule so new nodes will be added quickly - some random value between 30 and 80 ms
+ .put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms")
+ // default to non gateway
+ .put("gateway.type", "none")
+ .put(SETTING_CLUSTER_NODE_SEED, seed);
+ if (ENABLE_MOCK_MODULES && usually(random)) {
+ builder.put("index.store.type", MockFSIndexStoreModule.class.getName()); // no RAM dir for now!
+ builder.put(IndexEngineModule.EngineSettings.ENGINE_TYPE, MockEngineModule.class.getName());
+ builder.put(PageCacheRecyclerModule.CACHE_IMPL, MockPageCacheRecyclerModule.class.getName());
+ }
+ if (isLocalTransportConfigured()) {
+ builder.put(TransportModule.TRANSPORT_TYPE_KEY, AssertingLocalTransportModule.class.getName());
+ } else {
+ builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random));
+ }
+ builder.put("type", RandomPicks.randomFrom(random, CacheRecycler.Type.values()));
+ if (random.nextBoolean()) {
+ builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, CacheRecycler.Type.values()));
+ }
+ if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(10 + random.nextInt(2000)));
+ } else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
+ }
+ if (random.nextBoolean()) { // sometimes set a
+ builder.put(SearchService.DEFAUTL_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5*60)));
+ }
+ if (random.nextBoolean()) {
+ // change threadpool types to make sure we don't have components that rely on the type of thread pools
+ for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
+ ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.MERGE, ThreadPool.Names.OPTIMIZE,
+ ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
+ ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
+ if (random.nextBoolean()) {
+ final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
+ builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
+ }
+ }
+ }
+ return builder.build();
+ }
+
+ public static String clusterName(String prefix, String childVMId, long clusterSeed) {
+ StringBuilder builder = new StringBuilder(prefix);
+ builder.append('-').append(NetworkUtils.getLocalAddress().getHostName());
+ builder.append("-CHILD_VM=[").append(childVMId).append(']');
+ builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
+ // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
+ builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']');
+ return builder.toString();
+ }
+
+ private void ensureOpen() {
+ if (!open.get()) {
+ throw new RuntimeException("Cluster is already closed");
+ }
+ }
+
+ private synchronized NodeAndClient getOrBuildRandomNode() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient;
+ }
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode;
+ }
+
+ private synchronized NodeAndClient getRandomNodeAndClient() {
+ Predicate<NodeAndClient> all = Predicates.alwaysTrue();
+ return getRandomNodeAndClient(all);
+ }
+
+
+ private synchronized NodeAndClient getRandomNodeAndClient(Predicate<NodeAndClient> predicate) {
+ ensureOpen();
+ Collection<NodeAndClient> values = Collections2.filter(nodes.values(), predicate);
+ if (!values.isEmpty()) {
+ int whichOne = random.nextInt(values.size());
+ for (NodeAndClient nodeAndClient : values) {
+ if (whichOne-- == 0) {
+ return nodeAndClient;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Ensures that at least <code>n</code> nodes are present in the cluster.
+ * if more nodes than <code>n</code> are present this method will not
+ * stop any of the running nodes.
+ */
+ public synchronized void ensureAtLeastNumNodes(int n) {
+ int size = nodes.size();
+ for (int i = size; i < n; i++) {
+ logger.info("increasing cluster size from {} to {}", size, n);
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ }
+ }
+
+ /**
+ * Ensures that at most <code>n</code> are up and running.
+ * If less nodes that <code>n</code> are running this method
+ * will not start any additional nodes.
+ */
+ public synchronized void ensureAtMostNumNodes(int n) {
+ if (nodes.size() <= n) {
+ return;
+ }
+ // prevent killing the master if possible
+ final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator() : Iterators.filter(nodes.values().iterator(), Predicates.not(new MasterNodePredicate(getMasterName())));
+ final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n);
+ logger.info("reducing cluster size from {} to {}", nodes.size() - n, n);
+ Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
+ while (limit.hasNext()) {
+ NodeAndClient next = limit.next();
+ nodesToRemove.add(next);
+ next.close();
+ }
+ for (NodeAndClient toRemove : nodesToRemove) {
+ nodes.remove(toRemove.name);
+ }
+ }
+
+ private NodeAndClient buildNode(Settings settings) {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), settings);
+ }
+
+ private NodeAndClient buildNode() {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), null);
+ }
+
+ private NodeAndClient buildNode(int nodeId, long seed, Settings settings) {
+ ensureOpen();
+ settings = getSettings(nodeId, seed, settings);
+ String name = buildNodeName(nodeId);
+ assert !nodes.containsKey(name);
+ Settings finalSettings = settingsBuilder()
+ .put(settings)
+ .put("name", name)
+ .put("discovery.id.seed", seed)
+ .build();
+ Node node = nodeBuilder().settings(finalSettings).build();
+ return new NodeAndClient(name, node, new RandomClientFactory());
+ }
+
+ private String buildNodeName(int id) {
+ return "node_" + id;
+ }
+
+ public synchronized Client client() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getOrBuildRandomNode().client(random);
+ }
+
+ /**
+ * Returns a node client to the current master node.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client masterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client master is requested
+ }
+ Assert.fail("No master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a node client to random node but not the master. This method will fail if no non-master client is available.
+ */
+ public synchronized Client nonMasterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested
+ }
+ Assert.fail("No non-master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a client to a node started with "node.client: true"
+ */
+ public synchronized Client clientNodeClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ startNodeClient(ImmutableSettings.EMPTY);
+ return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
+ }
+
+ /**
+ * Returns a transport client
+ */
+ public synchronized Client transportClient() {
+ ensureOpen();
+ // randomly return a transport client going to one of the nodes in the cluster
+ return getOrBuildRandomNode().transportClient();
+ }
+
+ /**
+ * Returns a node client to a given node.
+ */
+ public synchronized Client client(String nodeName) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = nodes.get(nodeName);
+ if (nodeAndClient != null) {
+ return nodeAndClient.client(random);
+ }
+ Assert.fail("No node found with name: [" + nodeName + "]");
+ return null; // can't happen
+ }
+
+
+ /**
+ * Returns a "smart" node client to a random node in the cluster
+ */
+ public synchronized Client smartClient() {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient();
+ }
+ Assert.fail("No smart client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a random node that applies to the given predicate.
+ * The predicate can filter nodes based on the nodes settings.
+ * If all nodes are filtered out this method will return <code>null</code>
+ */
+ public synchronized Client client(final Predicate<Settings> filterPredicate) {
+ ensureOpen();
+ final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new Predicate<NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filterPredicate.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ return null;
+ }
+
+ public void close() {
+ ensureOpen();
+ if (this.open.compareAndSet(true, false)) {
+ IOUtils.closeWhileHandlingException(nodes.values());
+ nodes.clear();
+ }
+ }
+
+ private final class NodeAndClient implements Closeable {
+ private InternalNode node;
+ private Client client;
+ private Client nodeClient;
+ private Client transportClient;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+ private final ClientFactory clientFactory;
+ private final String name;
+
+ NodeAndClient(String name, Node node, ClientFactory factory) {
+ this.node = (InternalNode) node;
+ this.name = name;
+ this.clientFactory = factory;
+ }
+
+ Node node() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return node;
+ }
+
+ Client client(Random random) {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (client != null) {
+ return client;
+ }
+ return client = clientFactory.client(node, clusterName, random);
+ }
+
+ Client nodeClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (nodeClient == null) {
+ Client maybeNodeClient = client(random);
+ if (client instanceof NodeClient) {
+ nodeClient = maybeNodeClient;
+ } else {
+ nodeClient = node.client();
+ }
+ }
+ return nodeClient;
+ }
+
+ Client transportClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (transportClient == null) {
+ Client maybeTransportClient = client(random);
+ if (maybeTransportClient instanceof TransportClient) {
+ transportClient = maybeTransportClient;
+ } else {
+ transportClient = TransportClientFactory.NO_SNIFF_CLIENT_FACTORY.client(node, clusterName, random);
+ }
+ }
+ return transportClient;
+ }
+
+ void resetClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ if (client != null) {
+ client.close();
+ client = null;
+ }
+ if (nodeClient != null) {
+ nodeClient.close();
+ nodeClient = null;
+ }
+ if (transportClient != null) {
+ transportClient.close();
+ transportClient = null;
+ }
+ }
+
+ void restart(RestartCallback callback) throws Exception {
+ assert callback != null;
+ if (!node.isClosed()) {
+ node.close();
+ }
+ Settings newSettings = callback.onNodeStopped(name);
+ if (newSettings == null) {
+ newSettings = ImmutableSettings.EMPTY;
+ }
+ if (callback.clearData(name)) {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ FileSystemUtils.deleteRecursively(nodeEnv.nodeDataLocations());
+ }
+ }
+ node = (InternalNode) nodeBuilder().settings(node.settings()).settings(newSettings).node();
+ resetClient();
+ }
+
+
+ @Override
+ public void close() {
+ closed.set(true);
+ if (client != null) {
+ client.close();
+ client = null;
+ }
+ if (nodeClient != null) {
+ nodeClient.close();
+ nodeClient = null;
+ }
+ node.close();
+
+ }
+ }
+
+ static class ClientFactory {
+
+ public Client client(Node node, String clusterName, Random random) {
+ return node.client();
+ }
+ }
+
+ static class TransportClientFactory extends ClientFactory {
+
+ private boolean sniff;
+ public static TransportClientFactory NO_SNIFF_CLIENT_FACTORY = new TransportClientFactory(false);
+ public static TransportClientFactory SNIFF_CLIENT_FACTORY = new TransportClientFactory(true);
+
+ private TransportClientFactory(boolean sniff) {
+ this.sniff = sniff;
+ }
+
+ @Override
+ public Client client(Node node, String clusterName, Random random) {
+ TransportAddress addr = ((InternalNode) node).injector().getInstance(TransportService.class).boundAddress().publishAddress();
+ TransportClient client = new TransportClient(settingsBuilder().put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_" + node.settings().get("name"))
+ .put(CLUSTER_NAME_KEY, clusterName).put("client.transport.sniff", sniff).build());
+ client.addTransportAddress(addr);
+ return client;
+ }
+ }
+
+ class RandomClientFactory extends ClientFactory {
+
+ @Override
+ public Client client(Node node, String clusterName, Random random) {
+ double nextDouble = random.nextDouble();
+ if (nextDouble < transportClientRatio) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using transport client for node [{}] sniff: [{}]", node.settings().get("name"), false);
+ }
+ /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down.
+ * we first need support of transportClientRatio as annotations or so
+ */
+ return TransportClientFactory.NO_SNIFF_CLIENT_FACTORY.client(node, clusterName, random);
+ } else {
+ return node.client();
+ }
+ }
+ }
+
+ /**
+ * This method should be executed before each test to reset the cluster to it's initial state.
+ */
+ public synchronized void beforeTest(Random random, double transportClientRatio) {
+ reset(random, true, transportClientRatio);
+ }
+
+ private synchronized void reset(Random random, boolean wipeData, double transportClientRatio) {
+ assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
+ logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
+ this.transportClientRatio = transportClientRatio;
+ this.random = new Random(random.nextLong());
+ resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ if (wipeData) {
+ wipeDataDirectories();
+ }
+ if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
+ logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ return;
+ }
+ logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+
+
+ Set<NodeAndClient> sharedNodes = new HashSet<NodeAndClient>();
+ boolean changed = false;
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], null);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ if (!changed && sharedNodes.size() == nodes.size()) {
+ logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ return; // we are consistent - return
+ }
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ nodes.remove(nodeAndClient.name);
+ }
+
+ // trash the remaining nodes
+ final Collection<NodeAndClient> toShutDown = nodes.values();
+ for (NodeAndClient nodeAndClient : toShutDown) {
+ logger.debug("Close Node [{}] not shared", nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ nodes.clear();
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ publishNode(nodeAndClient);
+ }
+ nextNodeId.set(sharedNodesSeeds.length);
+ assert size() == sharedNodesSeeds.length;
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ }
+
+ public void wipe() {
+ wipeIndices("_all");
+ wipeTemplates();
+ wipeRepositories();
+ }
+
+ /**
+ * Deletes the given indices from the tests cluster. If no index name is passed to this method
+ * all indices are removed.
+ */
+ public void wipeIndices(String... indices) {
+ assert indices != null && indices.length > 0;
+ if (size() > 0) {
+ try {
+ assertAcked(client().admin().indices().prepareDelete(indices));
+ } catch (IndexMissingException e) {
+ // ignore
+ } catch (ElasticsearchIllegalArgumentException e) {
+ // Happens if `action.destructive_requires_name` is set to true
+ // which is the case in the CloseIndexDisableCloseAllTests
+ if ("_all".equals(indices[0])) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ ObjectArrayList<String> concreteIndices = new ObjectArrayList<String>();
+ for (IndexMetaData indexMetaData : clusterStateResponse.getState().metaData()) {
+ concreteIndices.add(indexMetaData.getIndex());
+ }
+ if (!concreteIndices.isEmpty()) {
+ assertAcked(client().admin().indices().prepareDelete(concreteIndices.toArray(String.class)));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes index templates, support wildcard notation.
+ * If no template name is passed to this method all templates are removed.
+ */
+ public void wipeTemplates(String... templates) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (templates.length == 0) {
+ templates = new String[]{"*"};
+ }
+ for (String template : templates) {
+ try {
+ client().admin().indices().prepareDeleteTemplate(template).execute().actionGet();
+ } catch (IndexTemplateMissingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes repositories, supports wildcard notation.
+ */
+ public void wipeRepositories(String... repositories) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (repositories.length == 0) {
+ repositories = new String[]{"*"};
+ }
+ for (String repository : repositories) {
+ try {
+ client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
+ } catch (RepositoryMissingException ex) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a randomized index template. This template is used to pass in randomized settings on a
+ * per index basis.
+ */
+ public void randomIndexTemplate() {
+ // TODO move settings for random directory etc here into the index based randomized settings.
+ if (size() > 0) {
+ client().admin().indices().preparePutTemplate("random_index_template")
+ .setTemplate("*")
+ .setOrder(0)
+ .setSettings(setRandomNormsLoading(setRandomMerge(random, ImmutableSettings.builder())
+ .put(SETTING_INDEX_SEED, random.nextLong())))
+ .execute().actionGet();
+ }
+ }
+
+
+ private ImmutableSettings.Builder setRandomNormsLoading(ImmutableSettings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(SearchService.NORMS_LOADING_KEY, RandomPicks.randomFrom(random, Arrays.asList(FieldMapper.Loading.EAGER, FieldMapper.Loading.LAZY)));
+ }
+ return builder;
+ }
+
+ private static ImmutableSettings.Builder setRandomMerge(Random random, ImmutableSettings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT,
+ random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
+ }
+ Class<? extends MergePolicyProvider<?>> mergePolicy = TieredMergePolicyProvider.class;
+ switch (random.nextInt(5)) {
+ case 4:
+ mergePolicy = LogByteSizeMergePolicyProvider.class;
+ break;
+ case 3:
+ mergePolicy = LogDocMergePolicyProvider.class;
+ break;
+ case 0:
+ mergePolicy = null;
+ }
+ if (mergePolicy != null) {
+ builder.put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, mergePolicy.getName());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(MergeSchedulerProvider.FORCE_ASYNC_MERGE, random.nextBoolean());
+ }
+ switch (random.nextInt(5)) {
+ case 4:
+ builder.put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, SerialMergeSchedulerProvider.class.getName());
+ break;
+ case 3:
+ builder.put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, ConcurrentMergeSchedulerProvider.class.getName());
+ break;
+ }
+
+ return builder;
+ }
+
+ /**
+ * This method should be executed during tearDown
+ */
+ public synchronized void afterTest() {
+ wipeDataDirectories();
+ resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ }
+
+ public void assertAfterTest() throws IOException {
+ assertAllSearchersClosed();
+ assertAllFilesClosed();
+ }
+
+ private void resetClients() {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ nodeAndClient.resetClient();
+ }
+ }
+
+ private void wipeDataDirectories() {
+ if (!dataDirToClean.isEmpty()) {
+ logger.info("Wipe data directory for all nodes locations: {}", this.dataDirToClean);
+ try {
+ FileSystemUtils.deleteRecursively(dataDirToClean.toArray(new File[dataDirToClean.size()]));
+ } finally {
+ this.dataDirToClean.clear();
+ }
+ }
+ }
+
+ /**
+ * Returns a reference to a random nodes {@link ClusterService}
+ */
+ public synchronized ClusterService clusterService() {
+ return getInstance(ClusterService.class);
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
+ List<T> instances = new ArrayList<T>(nodes.size());
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns a reference to the given nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz, final String node) {
+ final Predicate<TestCluster.NodeAndClient> predicate;
+ if (node != null) {
+ predicate = new Predicate<TestCluster.NodeAndClient>() {
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return node.equals(nodeAndClient.name);
+ }
+ };
+ } else {
+ predicate = Predicates.alwaysTrue();
+ }
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate);
+ assert randomNodeAndClient != null;
+ return getInstanceFromNode(clazz, randomNodeAndClient.node);
+ }
+
+ /**
+ * Returns a reference to a random nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz) {
+ return getInstance(clazz, null);
+ }
+
+ private synchronized <T> T getInstanceFromNode(Class<T> clazz, InternalNode node) {
+ return node.injector().getInstance(clazz);
+ }
+
+ /**
+ * Returns the number of nodes in the cluster.
+ */
+ public synchronized int size() {
+ return this.nodes.size();
+ }
+
+ /**
+ * Stops a random node in the cluster.
+ */
+ public synchronized void stopRandomNode() {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient();
+ if (nodeAndClient != null) {
+ logger.info("Closing random node [{}] ", nodeAndClient.name);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Stops a random node in the cluster that applies to the given filter or non if the non of the nodes applies to the
+ * filter.
+ */
+ public synchronized void stopRandomNode(final Predicate<Settings> filter) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new Predicate<TestCluster.NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filter.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (nodeAndClient != null) {
+ logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+
+ /**
+ * Stops the current master node forcefully
+ */
+ public synchronized void stopCurrentMasterNode() {
+ ensureOpen();
+ assert size() > 0;
+ String masterNodeName = getMasterName();
+ assert nodes.containsKey(masterNodeName);
+ logger.info("Closing master node [{}] ", masterNodeName);
+ NodeAndClient remove = nodes.remove(masterNodeName);
+ remove.close();
+ }
+
+ /**
+ * Stops the any of the current nodes but not the master node.
+ */
+ public void stopRandomNonMasterNode() {
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (nodeAndClient != null) {
+ logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Restarts a random node in the cluster
+ */
+ public void restartRandomNode() throws Exception {
+ restartRandomNode(EMPTY_CALLBACK);
+ }
+
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomNode(RestartCallback callback) throws Exception {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient();
+ if (nodeAndClient != null) {
+ logger.info("Restarting random node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+
+ private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
+ ensureOpen();
+ List<NodeAndClient> toRemove = new ArrayList<TestCluster.NodeAndClient>();
+ try {
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ if (!callback.doRestart(nodeAndClient.name)) {
+ logger.info("Closing node [{}] during restart", nodeAndClient.name);
+ toRemove.add(nodeAndClient);
+ nodeAndClient.close();
+ }
+ }
+ } finally {
+ for (NodeAndClient nodeAndClient : toRemove) {
+ nodes.remove(nodeAndClient.name);
+ }
+ }
+ logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
+ if (rollingRestart) {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Restarting node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ } else {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Stopping node [{}] ", nodeAndClient.name);
+ nodeAndClient.node.close();
+ }
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ logger.info("Starting node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+ }
+
+
+ private static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
+ public Settings onNodeStopped(String node) {
+ return null;
+ }
+ };
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart() throws Exception {
+ fullRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart() throws Exception {
+ rollingRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart(RestartCallback function) throws Exception {
+ restartAllNodes(true, function);
+ }
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart(RestartCallback function) throws Exception {
+ restartAllNodes(false, function);
+ }
+
+
+ private String getMasterName() {
+ try {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ return state.nodes().masterNode().name();
+ } catch (Throwable e) {
+ logger.warn("Can't fetch cluster state", e);
+ throw new RuntimeException("Can't get master node " + e.getMessage(), e);
+ }
+ }
+
+ synchronized Set<String> allButN(int numNodes) {
+ return nRandomNodes(size() - numNodes);
+ }
+
+ private synchronized Set<String> nRandomNodes(int numNodes) {
+ assert size() >= numNodes;
+ return Sets.newHashSet(Iterators.limit(this.nodes.keySet().iterator(), numNodes));
+ }
+
+ public synchronized void startNodeClient(Settings settings) {
+ ensureOpen(); // currently unused
+ startNode(settingsBuilder().put(settings).put("node.client", true));
+ }
+
+ /**
+ * Returns a set of nodes that have at least one shard of the given index.
+ */
+ public synchronized Set<String> nodesInclude(String index) {
+ if (clusterService().state().routingTable().hasIndex(index)) {
+ List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
+ DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
+ Set<String> nodes = new HashSet<String>();
+ for (ShardRouting shardRouting : allShards) {
+ if (shardRouting.assignedToNode()) {
+ DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
+ nodes.add(discoveryNode.getName());
+ }
+ }
+ return nodes;
+ }
+ return Collections.emptySet();
+ }
+
+ /**
+ * Starts a node with default settings and returns it's name.
+ */
+ public String startNode() {
+ return startNode(ImmutableSettings.EMPTY);
+ }
+
+ /**
+ * Starts a node with the given settings builder and returns it's name.
+ */
+ public String startNode(Settings.Builder settings) {
+ return startNode(settings.build());
+ }
+
+ /**
+ * Starts a node with the given settings and returns it's name.
+ */
+ public String startNode(Settings settings) {
+ NodeAndClient buildNode = buildNode(settings);
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode.name;
+ }
+
+ private void publishNode(NodeAndClient nodeAndClient) {
+ assert !nodeAndClient.node().isClosed();
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataLocations()));
+ }
+ nodes.put(nodeAndClient.name, nodeAndClient);
+
+ }
+
+ public void closeNonSharedNodes(boolean wipeData) {
+ reset(random, wipeData, transportClientRatio);
+ }
+
+
+ private static final class MasterNodePredicate implements Predicate<NodeAndClient> {
+ private final String masterNodeName;
+
+ public MasterNodePredicate(String masterNodeName) {
+ this.masterNodeName = masterNodeName;
+ }
+
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return masterNodeName.equals(nodeAndClient.name);
+ }
+ }
+
+ private static final class ClientNodePredicate implements Predicate<NodeAndClient> {
+
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return nodeAndClient.node.settings().getAsBoolean("node.client", false);
+ }
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ ensureOpen();
+ final Iterator<NodeAndClient> iterator = nodes.values().iterator();
+ return new Iterator<Client>() {
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Client next() {
+ return iterator.next().client(random);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("");
+ }
+
+ };
+ }
+
+ /**
+ * Returns a predicate that only accepts settings of nodes with one of the given names.
+ */
+ public static Predicate<Settings> nameFilter(String... nodeName) {
+ return new NodeNamePredicate(new HashSet<String>(Arrays.asList(nodeName)));
+ }
+
+ private static final class NodeNamePredicate implements Predicate<Settings> {
+ private final HashSet<String> nodeNames;
+
+
+ public NodeNamePredicate(HashSet<String> nodeNames) {
+ this.nodeNames = nodeNames;
+ }
+
+ @Override
+ public boolean apply(Settings settings) {
+ return nodeNames.contains(settings.get("name"));
+
+ }
+ }
+
+
+ /**
+ * An abstract class that is called during {@link #rollingRestart(org.elasticsearch.test.TestCluster.RestartCallback)}
+ * and / or {@link #fullRestart(org.elasticsearch.test.TestCluster.RestartCallback)} to execute actions at certain
+ * stages of the restart.
+ */
+ public static abstract class RestartCallback {
+
+ /**
+ * Executed once the give node name has been stopped.
+ */
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return ImmutableSettings.EMPTY;
+ }
+
+ /**
+ * Executed for each node before the <tt>n+1</tt> node is restarted. The given client is
+ * an active client to the node that will be restarted next.
+ */
+ public void doAfterNodes(int n, Client client) throws Exception {
+ }
+
+ /**
+ * If this returns <code>true</code> all data for the node with the given node name will be cleared including
+ * gateways and all index data. Returns <code>false</code> by default.
+ */
+ public boolean clearData(String nodeName) {
+ return false;
+ }
+
+
+ /**
+ * If this returns <code>false</code> the node with the given node name will not be restarted. It will be
+ * closed and removed from the cluster. Returns <code>true</code> by default.
+ */
+ public boolean doRestart(String nodeName) {
+ return true;
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
new file mode 100644
index 0000000..3f5b2a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class MockPageCacheRecyclerModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(PageCacheRecycler.class).to(MockPageCacheRecycler.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/client/RandomizingClient.java b/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
new file mode 100644
index 0000000..cbdf36e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountRequest;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteRequestBuilder;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
+import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.explain.ExplainRequestBuilder;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.mlt.MoreLikeThisRequest;
+import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
+import org.elasticsearch.action.percolate.*;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.action.termvector.*;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Arrays;
+import java.util.Random;
+
+/** A {@link Client} that randomizes request parameters. */
+public class RandomizingClient implements InternalClient {
+
+ private final SearchType defaultSearchType;
+ private final InternalClient delegate;
+
+ public RandomizingClient(InternalClient client, Random random) {
+ this.delegate = client;
+ // we don't use the QUERY_AND_FETCH types that break quite a lot of tests
+ // given that they return `size*num_shards` hits instead of `size`
+ defaultSearchType = RandomPicks.randomFrom(random, Arrays.asList(
+ SearchType.DFS_QUERY_THEN_FETCH,
+ SearchType.QUERY_THEN_FETCH));
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ @Override
+ public AdminClient admin() {
+ return delegate.admin();
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
+ Action<Request, Response, RequestBuilder> action, Request request) {
+ return delegate.execute(action, request);
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
+ Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ delegate.execute(action, request, listener);
+ }
+
+ @Override
+ public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
+ Action<Request, Response, RequestBuilder> action) {
+ return delegate.prepareExecute(action);
+ }
+
+ @Override
+ public ActionFuture<IndexResponse> index(IndexRequest request) {
+ return delegate.index(request);
+ }
+
+ @Override
+ public void index(IndexRequest request, ActionListener<IndexResponse> listener) {
+ delegate.index(request, listener);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex() {
+ return delegate.prepareIndex();
+ }
+
+ @Override
+ public ActionFuture<UpdateResponse> update(UpdateRequest request) {
+ return delegate.update(request);
+ }
+
+ @Override
+ public void update(UpdateRequest request, ActionListener<UpdateResponse> listener) {
+ delegate.update(request, listener);
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate() {
+ return delegate.prepareUpdate();
+ }
+
+ @Override
+ public UpdateRequestBuilder prepareUpdate(String index, String type, String id) {
+ return delegate.prepareUpdate(index, type, id);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type) {
+ return delegate.prepareIndex(index, type);
+ }
+
+ @Override
+ public IndexRequestBuilder prepareIndex(String index, String type, String id) {
+ return delegate.prepareIndex(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<DeleteResponse> delete(DeleteRequest request) {
+ return delegate.delete(request);
+ }
+
+ @Override
+ public void delete(DeleteRequest request, ActionListener<DeleteResponse> listener) {
+ delegate.delete(request, listener);
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete() {
+ return delegate.prepareDelete();
+ }
+
+ @Override
+ public DeleteRequestBuilder prepareDelete(String index, String type, String id) {
+ return delegate.prepareDelete(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<BulkResponse> bulk(BulkRequest request) {
+ return delegate.bulk(request);
+ }
+
+ @Override
+ public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) {
+ delegate.bulk(request, listener);
+ }
+
+ @Override
+ public BulkRequestBuilder prepareBulk() {
+ return delegate.prepareBulk();
+ }
+
+ @Override
+ public ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request) {
+ return delegate.deleteByQuery(request);
+ }
+
+ @Override
+ public void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
+ delegate.deleteByQuery(request, listener);
+ }
+
+ @Override
+ public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) {
+ return delegate.prepareDeleteByQuery(indices);
+ }
+
+ @Override
+ public ActionFuture<GetResponse> get(GetRequest request) {
+ return delegate.get(request);
+ }
+
+ @Override
+ public void get(GetRequest request, ActionListener<GetResponse> listener) {
+ delegate.get(request, listener);
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet() {
+ return delegate.prepareGet();
+ }
+
+ @Override
+ public GetRequestBuilder prepareGet(String index, String type, String id) {
+ return delegate.prepareGet(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request) {
+ return delegate.multiGet(request);
+ }
+
+ @Override
+ public void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener) {
+ delegate.multiGet(request, listener);
+ }
+
+ @Override
+ public MultiGetRequestBuilder prepareMultiGet() {
+ return delegate.prepareMultiGet();
+ }
+
+ @Override
+ public ActionFuture<CountResponse> count(CountRequest request) {
+ return delegate.count(request);
+ }
+
+ @Override
+ public void count(CountRequest request, ActionListener<CountResponse> listener) {
+ delegate.count(request, listener);
+ }
+
+ @Override
+ public CountRequestBuilder prepareCount(String... indices) {
+ return delegate.prepareCount(indices);
+ }
+
+ @Override
+ public ActionFuture<SuggestResponse> suggest(SuggestRequest request) {
+ return delegate.suggest(request);
+ }
+
+ @Override
+ public void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener) {
+ delegate.suggest(request, listener);
+ }
+
+ @Override
+ public SuggestRequestBuilder prepareSuggest(String... indices) {
+ return delegate.prepareSuggest(indices);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> search(SearchRequest request) {
+ return delegate.search(request);
+ }
+
+ @Override
+ public void search(SearchRequest request, ActionListener<SearchResponse> listener) {
+ delegate.search(request, listener);
+ }
+
+ @Override
+ public SearchRequestBuilder prepareSearch(String... indices) {
+ return delegate.prepareSearch(indices).setSearchType(defaultSearchType);
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request) {
+ return delegate.searchScroll(request);
+ }
+
+ @Override
+ public void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
+ delegate.searchScroll(request, listener);
+ }
+
+ @Override
+ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
+ return delegate.prepareSearchScroll(scrollId);
+ }
+
+ @Override
+ public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
+ return delegate.multiSearch(request);
+ }
+
+ @Override
+ public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
+ delegate.multiSearch(request, listener);
+ }
+
+ @Override
+ public MultiSearchRequestBuilder prepareMultiSearch() {
+ return delegate.prepareMultiSearch();
+ }
+
+ @Override
+ public ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request) {
+ return delegate.moreLikeThis(request);
+ }
+
+ @Override
+ public void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener) {
+ delegate.moreLikeThis(request, listener);
+ }
+
+ @Override
+ public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) {
+ return delegate.prepareMoreLikeThis(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<TermVectorResponse> termVector(TermVectorRequest request) {
+ return delegate.termVector(request);
+ }
+
+ @Override
+ public void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener) {
+ delegate.termVector(request, listener);
+ }
+
+ @Override
+ public TermVectorRequestBuilder prepareTermVector(String index, String type, String id) {
+ return delegate.prepareTermVector(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<MultiTermVectorsResponse> multiTermVectors(MultiTermVectorsRequest request) {
+ return delegate.multiTermVectors(request);
+ }
+
+ @Override
+ public void multiTermVectors(MultiTermVectorsRequest request, ActionListener<MultiTermVectorsResponse> listener) {
+ delegate.multiTermVectors(request, listener);
+ }
+
+ @Override
+ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
+ return delegate.prepareMultiTermVectors();
+ }
+
+ @Override
+ public ActionFuture<PercolateResponse> percolate(PercolateRequest request) {
+ return delegate.percolate(request);
+ }
+
+ @Override
+ public void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener) {
+ delegate.percolate(request, listener);
+ }
+
+ @Override
+ public PercolateRequestBuilder preparePercolate() {
+ return delegate.preparePercolate();
+ }
+
+ @Override
+ public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
+ return delegate.multiPercolate(request);
+ }
+
+ @Override
+ public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
+ delegate.multiPercolate(request, listener);
+ }
+
+ @Override
+ public MultiPercolateRequestBuilder prepareMultiPercolate() {
+ return delegate.prepareMultiPercolate();
+ }
+
+ @Override
+ public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
+ return delegate.prepareExplain(index, type, id);
+ }
+
+ @Override
+ public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
+ return delegate.explain(request);
+ }
+
+ @Override
+ public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
+ delegate.explain(request, listener);
+ }
+
+ @Override
+ public ClearScrollRequestBuilder prepareClearScroll() {
+ return delegate.prepareClearScroll();
+ }
+
+ @Override
+ public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
+ return delegate.clearScroll(request);
+ }
+
+ @Override
+ public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
+ delegate.clearScroll(request, listener);
+ }
+
+ @Override
+ public ThreadPool threadPool() {
+ return delegate.threadPool();
+ }
+
+ @Override
+ public Settings settings() {
+ return delegate.settings();
+ }
+
+ @Override
+ public String toString() {
+ return "randomized(" + super.toString() + ")";
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java b/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java
new file mode 100644
index 0000000..d8f4c4a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/MockEngineModule.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.engine.Engine;
+
+public class MockEngineModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(Engine.class).to(MockInternalEngine.class).asEagerSingleton();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
new file mode 100644
index 0000000..f3a84e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.XSearcherManager;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.internal.InternalEngine;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.warmer.IndicesWarmer;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Constructor;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+public final class MockInternalEngine extends InternalEngine implements Engine {
+ public static final ConcurrentMap<AssertingSearcher, RuntimeException> INFLIGHT_ENGINE_SEARCHERS = new ConcurrentHashMap<AssertingSearcher, RuntimeException>();
+ public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio";
+ public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper";
+
+ private final Random random;
+ private final boolean wrapReader;
+ private final Class<? extends FilterDirectoryReader> wrapper;
+
+ @Inject
+ public MockInternalEngine(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
+ IndexSettingsService indexSettingsService, ShardIndexingService indexingService, @Nullable IndicesWarmer warmer, Store store,
+ SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider,
+ MergeSchedulerProvider mergeScheduler, AnalysisService analysisService, SimilarityService similarityService,
+ CodecService codecService) throws EngineException {
+ super(shardId, indexSettings, threadPool, indexSettingsService, indexingService, warmer, store,
+ deletionPolicy, translog, mergePolicyProvider, mergeScheduler, analysisService, similarityService, codecService);
+ final long seed = indexSettings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
+ wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class);
+ wrapReader = random.nextDouble() < ratio;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader);
+ }
+ }
+
+
+ public void close() throws ElasticsearchException {
+ try {
+ super.close();
+ } finally {
+ if (logger.isTraceEnabled()) {
+ // log debug if we have pending searchers
+ for (Entry<MockInternalEngine.AssertingSearcher, RuntimeException> entry : MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.entrySet()) {
+ logger.trace("Unreleased Searchers instance for shard [{}]", entry.getValue(), entry.getKey().shardId);
+ }
+ }
+ }
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, XSearcherManager manager) throws EngineException {
+
+ IndexReader reader = searcher.getIndexReader();
+ IndexReader wrappedReader = reader;
+ if (reader instanceof DirectoryReader && wrapReader) {
+ wrappedReader = wrapReader((DirectoryReader) reader);
+ }
+ // this executes basic query checks and asserts that weights are normalized only once etc.
+ final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(random, wrappedReader);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity());
+ // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will
+ // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager
+ // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here
+ return new AssertingSearcher(assertingIndexSearcher, super.newSearcher(source, searcher, manager), shardId);
+ }
+
+ private DirectoryReader wrapReader(DirectoryReader reader) {
+ try {
+ Constructor<?>[] constructors = wrapper.getConstructors();
+ Constructor<?> nonRandom = null;
+ for (Constructor<?> constructor : constructors) {
+ Class<?>[] parameterTypes = constructor.getParameterTypes();
+ if (parameterTypes.length > 0 && parameterTypes[0] == DirectoryReader.class) {
+ if (parameterTypes.length == 1) {
+ nonRandom = constructor;
+ } else if (parameterTypes.length == 2 && parameterTypes[1] == Settings.class) {
+
+ return (DirectoryReader) constructor.newInstance(reader, indexSettings);
+ }
+ }
+ }
+ if (nonRandom != null) {
+ return (DirectoryReader) nonRandom.newInstance(reader);
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchException("Can not wrap reader", e);
+ }
+ return reader;
+ }
+
+ public final class AssertingSearcher implements Searcher {
+ private final Searcher wrappedSearcher;
+ private final ShardId shardId;
+ private final IndexSearcher indexSearcher;
+ private RuntimeException firstReleaseStack;
+ private final Object lock = new Object();
+ private final int initialRefCount;
+
+ public AssertingSearcher(IndexSearcher indexSearcher, Searcher wrappedSearcher, ShardId shardId) {
+ // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher
+ // with a wrapped reader.
+ this.wrappedSearcher = wrappedSearcher;
+ this.shardId = shardId;
+ initialRefCount = wrappedSearcher.reader().getRefCount();
+ this.indexSearcher = indexSearcher;
+ assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed";
+ INFLIGHT_ENGINE_SEARCHERS.put(this, new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]"));
+ }
+
+ @Override
+ public String source() {
+ return wrappedSearcher.source();
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ RuntimeException remove = INFLIGHT_ENGINE_SEARCHERS.remove(this);
+ synchronized (lock) {
+ // make sure we only get this once and store the stack of the first caller!
+ if (remove == null) {
+ assert firstReleaseStack != null;
+ AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]");
+ error.initCause(firstReleaseStack);
+ throw error;
+ } else {
+ assert firstReleaseStack == null;
+ firstReleaseStack = new RuntimeException("Searcher Released first here, source [" + wrappedSearcher.source() + "]");
+ }
+ }
+ final int refCount = wrappedSearcher.reader().getRefCount();
+ // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential
+ // problems.
+ assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]";
+ try {
+ return wrappedSearcher.release();
+ } catch (RuntimeException ex) {
+ logger.debug("Failed to release searcher", ex);
+ throw ex;
+ }
+ }
+
+ @Override
+ public IndexReader reader() {
+ return indexSearcher.getIndexReader();
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return indexSearcher;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+ }
+
+ public static abstract class DirectoryReaderWrapper extends FilterDirectoryReader {
+ protected final SubReaderWrapper subReaderWrapper;
+
+ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrapper) {
+ super(in, subReaderWrapper);
+ this.subReaderWrapper = subReaderWrapper;
+ }
+
+ @Override
+ public Object getCoreCacheKey() {
+ return in.getCoreCacheKey();
+ }
+
+ @Override
+ public Object getCombinedCoreAndDeletesKey() {
+ return in.getCombinedCoreAndDeletesKey();
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java b/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java
new file mode 100644
index 0000000..cedb9de
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/engine/ThrowingAtomicReaderWrapper.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+
+import java.io.IOException;
+
+/**
+ * An FilterAtomicReader that allows to throw exceptions if certain methods
+ * are called on is. This allows to test parts of the system under certain
+ * error conditions that would otherwise not be possible.
+ */
+public class ThrowingAtomicReaderWrapper extends FilterAtomicReader {
+
+ private final Thrower thrower;
+
+ /**
+ * Flags passed to {@link Thrower#maybeThrow(org.elasticsearch.test.engine.ThrowingAtomicReaderWrapper.Flags)}
+ * when the corresponding method is called.
+ */
+ public enum Flags {
+ TermVectors,
+ Terms,
+ TermsEnum,
+ Intersect,
+ DocsEnum,
+ DocsAndPositionsEnum,
+ Fields,
+ Norms, NumericDocValues, BinaryDocValues, SortedDocValues, SortedSetDocValues;
+ }
+
+ /**
+ * A callback interface that allows to throw certain exceptions for
+ * methods called on the IndexReader that is wrapped by {@link ThrowingAtomicReaderWrapper}
+ */
+ public static interface Thrower {
+ /**
+ * Maybe throws an exception ;)
+ */
+ public void maybeThrow(Flags flag) throws IOException;
+
+ /**
+ * If this method returns true the {@link Terms} instance for the given field
+ * is wrapped with Thrower support otherwise no exception will be thrown for
+ * the current {@link Terms} instance or any other instance obtained from it.
+ */
+ public boolean wrapTerms(String field);
+ }
+
+ public ThrowingAtomicReaderWrapper(AtomicReader in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+
+ @Override
+ public Fields fields() throws IOException {
+ Fields fields = super.fields();
+ thrower.maybeThrow(Flags.Fields);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ Fields fields = super.getTermVectors(docID);
+ thrower.maybeThrow(Flags.TermVectors);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ /**
+ * Wraps a Fields but with additional asserts
+ */
+ public static class ThrowingFields extends FilterFields {
+ private final Thrower thrower;
+
+ public ThrowingFields(Fields in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (thrower.wrapTerms(field)) {
+ thrower.maybeThrow(Flags.Terms);
+ return terms == null ? null : new ThrowingTerms(terms, thrower);
+ }
+ return terms;
+ }
+ }
+
+ /**
+ * Wraps a Terms but with additional asserts
+ */
+ public static class ThrowingTerms extends FilterTerms {
+ private final Thrower thrower;
+
+ public ThrowingTerms(Terms in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws IOException {
+ TermsEnum termsEnum = in.intersect(automaton, bytes);
+ thrower.maybeThrow(Flags.Intersect);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+
+ @Override
+ public TermsEnum iterator(TermsEnum reuse) throws IOException {
+ TermsEnum termsEnum = super.iterator(reuse);
+ thrower.maybeThrow(Flags.TermsEnum);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+ }
+
+ static class ThrowingTermsEnum extends FilterTermsEnum {
+ private final Thrower thrower;
+
+ public ThrowingTermsEnum(TermsEnum in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+
+ }
+
+ @Override
+ public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ thrower.maybeThrow(Flags.DocsEnum);
+ return super.docs(liveDocs, reuse, flags);
+ }
+
+ @Override
+ public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ thrower.maybeThrow(Flags.DocsAndPositionsEnum);
+ return super.docsAndPositions(liveDocs, reuse, flags);
+ }
+ }
+
+
+ @Override
+ public NumericDocValues getNumericDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.NumericDocValues);
+ return super.getNumericDocValues(field);
+
+ }
+
+ @Override
+ public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.BinaryDocValues);
+ return super.getBinaryDocValues(field);
+ }
+
+ @Override
+ public SortedDocValues getSortedDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedDocValues);
+ return super.getSortedDocValues(field);
+ }
+
+ @Override
+ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedSetDocValues);
+ return super.getSortedSetDocValues(field);
+ }
+
+ @Override
+ public NumericDocValues getNormValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.Norms);
+ return super.getNormValues(field);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
new file mode 100644
index 0000000..b21e94d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Matcher;
+
+/**
+ * Assertions for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionAssertions {
+
+ public static Matcher<ImmutableOpenMap> hasKey(final String key) {
+ return new CollectionMatchers.ImmutableOpenMapHasKeyMatcher(key);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
new file mode 100644
index 0000000..521ba58
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ * Matchers for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionMatchers {
+
+ public static class ImmutableOpenMapHasKeyMatcher extends TypeSafeMatcher<ImmutableOpenMap> {
+
+ private final String key;
+
+ public ImmutableOpenMapHasKeyMatcher(String key) {
+ this.key = key;
+ }
+
+ @Override
+ protected boolean matchesSafely(ImmutableOpenMap item) {
+ return item.containsKey(key);
+ }
+
+ @Override
+ public void describeMismatchSafely(final ImmutableOpenMap map, final Description mismatchDescription) {
+ if (map.size() == 0) {
+ mismatchDescription.appendText("was empty");
+ } else {
+ mismatchDescription.appendText(" was ").appendValue(map);
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("ImmutableOpenMap should contain key " + key);
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
new file mode 100644
index 0000000..7849d0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.test.engine.MockInternalEngine;
+import org.elasticsearch.test.store.MockDirectoryHelper;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.ElasticsearchTestCase.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ *
+ */
+public class ElasticsearchAssertions {
+
+ public static void assertAcked(AcknowledgedRequestBuilder<?, ?, ?> builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(AcknowledgedResponse response) {
+ assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAcked(DeleteIndexRequestBuilder builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(DeleteIndexResponse response) {
+ assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static String formatShardStatus(BroadcastOperationResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardOperationFailedException failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ public static String formatShardStatus(SearchResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardSearchFailure failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ /*
+ * assertions
+ */
+ public static void assertHitCount(SearchResponse searchResponse, long expectedHitCount) {
+ if (searchResponse.getHits().totalHits() != expectedHitCount) {
+ fail("Hit count is " + searchResponse.getHits().totalHits() + " but " + expectedHitCount + " was expected. "
+ + formatShardStatus(searchResponse));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+
+ Set<String> idsSet = new HashSet<String>(Arrays.asList(ids));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat("Expected id: " + hit.getId() + " in the result but wasn't." + shardStatus, idsSet.remove(hit.getId()),
+ equalTo(true));
+ }
+ assertThat("Expected ids: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + " in the result - result size differs."
+ + shardStatus, idsSet.size(), equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+ for (int i = 0; i < ids.length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i]));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertHitCount(CountResponse countResponse, long expectedHitCount) {
+ if (countResponse.getCount() != expectedHitCount) {
+ fail("Count is " + countResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse));
+ }
+ assertVersionSerializable(countResponse);
+ }
+
+ public static void assertMatchCount(PercolateResponse percolateResponse, long expectedHitCount) {
+ if (percolateResponse.getCount() != expectedHitCount) {
+ fail("Count is " + percolateResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(percolateResponse));
+ }
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertExists(GetResponse response) {
+ String message = String.format(Locale.ROOT, "Expected %s/%s/%s to exist, but does not", response.getIndex(), response.getType(), response.getId());
+ assertThat(message, response.isExists(), is(true));
+ }
+
+ public static void assertFirstHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 1, matcher);
+ }
+
+ public static void assertSecondHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 2, matcher);
+ }
+
+ public static void assertThirdHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 3, matcher);
+ }
+
+ public static void assertSearchHit(SearchResponse searchResponse, int number, Matcher<SearchHit> matcher) {
+ assertThat(number, greaterThan(0));
+ assertThat("SearchHit number must be greater than 0", number, greaterThan(0));
+ assertThat(searchResponse.getHits().totalHits(), greaterThanOrEqualTo((long) number));
+ assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher);
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(SearchResponse searchResponse) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()),
+ searchResponse.getShardFailures().length, equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertFailures(SearchResponse searchResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ searchResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(BroadcastOperationResponse response) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(BroadcastOperationResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertSearchHit(SearchHit searchHit, Matcher<SearchHit> matcher) {
+ assertThat(searchHit, matcher);
+ assertVersionSerializable(searchHit);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ private static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertHighlight(resp.getHits().hits()[hit], field, fragment, fragmentsMatcher, matcher);
+ assertVersionSerializable(resp);
+ }
+
+ private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertThat(hit.getHighlightFields(), hasKey(field));
+ assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);
+ assertThat(hit.highlightFields().get(field).fragments()[fragment].string(), matcher);
+ }
+
+ public static void assertNotHighlighted(SearchResponse resp, int hit, String field) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertThat(resp.getHits().hits()[hit].getHighlightFields(), not(hasKey(field)));
+ }
+
+ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int size, String key) {
+ assertThat(searchSuggest, notNullValue());
+ assertThat(searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, String key, String text) {
+ assertThat(searchSuggest, notNullValue());
+ assertThat(searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord));
+ assertThat(searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ /**
+ * Assert suggestion returns exactly the provided text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, String... text) {
+ assertSuggestion(searchSuggest, entry, key, text.length, text);
+ }
+
+ /**
+ * Assert suggestion returns size suggestions and the first are the provided
+ * text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, int size, String... text) {
+ assertSuggestionSize(searchSuggest, entry, size, key);
+ for (int i = 0; i < text.length; i++) {
+ assertSuggestion(searchSuggest, entry, i, key, text[i]);
+ }
+ }
+
+ /*
+ * matchers
+ */
+ public static Matcher<SearchHit> hasId(final String id) {
+ return new ElasticsearchMatchers.SearchHitHasIdMatcher(id);
+ }
+
+ public static Matcher<SearchHit> hasType(final String type) {
+ return new ElasticsearchMatchers.SearchHitHasTypeMatcher(type);
+ }
+
+ public static Matcher<SearchHit> hasIndex(final String index) {
+ return new ElasticsearchMatchers.SearchHitHasIndexMatcher(index);
+ }
+
+ public static Matcher<SearchHit> hasScore(final float score) {
+ return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score);
+ }
+
+ public static <T extends Query> T assertBooleanSubQuery(Query query, Class<T> subqueryType, int i) {
+ assertThat(query, instanceOf(BooleanQuery.class));
+ BooleanQuery q = (BooleanQuery) query;
+ assertThat(q.getClauses().length, greaterThan(i));
+ assertThat(q.getClauses()[i].getQuery(), instanceOf(subqueryType));
+ return (T) q.getClauses()[i].getQuery();
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass) {
+ assertThrows(builder.execute(), exceptionClass);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(builder.execute(), exceptionClass, extraInfo);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass) {
+ assertThrows(future, exceptionClass, null);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + exceptionClass + " exception to be thrown";
+
+ try {
+ future.actionGet();
+ fail = true;
+
+ } catch (ElasticsearchException esException) {
+ assertThat(extraInfo, esException.unwrapCause(), instanceOf(exceptionClass));
+ } catch (Throwable e) {
+ assertThat(extraInfo, e, instanceOf(exceptionClass));
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ private static BytesReference serialize(Version version, Streamable streamable) throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ output.setVersion(version);
+ streamable.writeTo(output);
+ output.flush();
+ return output.bytes();
+ }
+
+ public static void assertVersionSerializable(Streamable streamable) {
+ assertTrue(Version.CURRENT.after(getPreviousVersion()));
+ assertVersionSerializable(randomVersion(), streamable);
+ }
+
+ public static void assertVersionSerializable(Version version, Streamable streamable) {
+ try {
+ Streamable newInstance = tryCreateNewInstance(streamable);
+ if (newInstance == null) {
+ return; // can't create a new instance - we never modify a
+ // streamable that comes in.
+ }
+ if (streamable instanceof ActionRequest) {
+ ((ActionRequest<?>)streamable).validate();
+ }
+ BytesReference orig = serialize(version, streamable);
+ StreamInput input = new BytesStreamInput(orig);
+ input.setVersion(version);
+ newInstance.readFrom(input);
+ assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), equalTo(0));
+ assertThat("Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable + "]", serialize(version, streamable), equalTo(orig));
+ } catch (Throwable ex) {
+ throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex);
+ }
+
+ }
+
+ private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ try {
+ Class<? extends Streamable> clazz = streamable.getClass();
+ Constructor<? extends Streamable> constructor = clazz.getDeclaredConstructor();
+ assertThat(constructor, Matchers.notNullValue());
+ constructor.setAccessible(true);
+ Streamable newInstance = constructor.newInstance();
+ return newInstance;
+ } catch (Throwable e) {
+ return null;
+ }
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchRequestBuilder request) {
+ return assertSearchResponse(request.get());
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchResponse response) {
+ assertNoFailures(response);
+ assertThat("One or more shards were not successful but didn't trigger a failure", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ return response;
+ }
+
+ public static void assertAllSearchersClosed() {
+ /* in some cases we finish a test faster than the freeContext calls make it to the
+ * shards. Let's wait for some time if there are still searchers. If the are really
+ * pending we will fail anyway.*/
+ try {
+ if (awaitBusy(new Predicate<Object>() {
+ public boolean apply(Object o) {
+ return MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.isEmpty();
+ }
+ }, 5, TimeUnit.SECONDS)) {
+ return;
+ }
+ } catch (InterruptedException ex) {
+ if (MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.isEmpty()) {
+ return;
+ }
+ }
+ try {
+ RuntimeException ex = null;
+ StringBuilder builder = new StringBuilder("Unclosed Searchers instance for shards: [");
+ for (Map.Entry<MockInternalEngine.AssertingSearcher, RuntimeException> entry : MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.entrySet()) {
+ ex = entry.getValue();
+ builder.append(entry.getKey().shardId()).append(",");
+ }
+ builder.append("]");
+ throw new RuntimeException(builder.toString(), ex);
+ } finally {
+ MockInternalEngine.INFLIGHT_ENGINE_SEARCHERS.clear();
+ }
+ }
+
+ public static void assertAllFilesClosed() throws IOException {
+ try {
+ for (MockDirectoryHelper.ElasticsearchMockDirectoryWrapper w : MockDirectoryHelper.wrappers) {
+ if (w.isOpen()) {
+ w.closeWithRuntimeException();
+ }
+ }
+ } finally {
+ MockDirectoryHelper.wrappers.clear();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
new file mode 100644
index 0000000..fe6a06b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.junit.Assert;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class ElasticsearchGeoAssertions {
+
+ private static int top(Coordinate...points) {
+ int top = 0;
+ for (int i = 1; i < points.length; i++) {
+ if(points[i].y < points[top].y) {
+ top = i;
+ } else if(points[i].y == points[top].y) {
+ if(points[i].x <= points[top].x) {
+ top = i;
+ }
+ }
+ }
+ return top;
+ }
+
+ private static int prev(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int p = (top + points.length - i) % points.length;
+ if((points[p].x != points[top].x) || (points[p].y != points[top].y)) {
+ return p;
+ }
+ }
+ return -1;
+ }
+
+ private static int next(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int n = (top + i) % points.length;
+ if((points[n].x != points[top].x) || (points[n].y != points[top].y)) {
+ return n;
+ }
+ }
+ return -1;
+ }
+
+ private static Coordinate[] fixedOrderedRing(List<Coordinate> coordinates, boolean direction) {
+ return fixedOrderedRing(coordinates.toArray(new Coordinate[coordinates.size()]), direction);
+ }
+
+ private static Coordinate[] fixedOrderedRing(Coordinate[] points, boolean direction) {
+
+ final int top = top(points);
+ final int next = next(top, points);
+ final int prev = prev(top, points);
+ final boolean orientation = points[next].x < points[prev].x;
+
+ if(orientation != direction) {
+ List<Coordinate> asList = Arrays.asList(points);
+ Collections.reverse(asList);
+ return fixedOrderedRing(asList, direction);
+ } else {
+ if(top>0) {
+ Coordinate[] aligned = new Coordinate[points.length];
+ System.arraycopy(points, top, aligned, 0, points.length-top-1);
+ System.arraycopy(points, 0, aligned, points.length-top-1, top);
+ aligned[aligned.length-1] = aligned[0];
+ return aligned;
+ } else {
+ return points;
+ }
+ }
+
+ }
+
+ public static void assertEquals(Coordinate c1, Coordinate c2) {
+ assertTrue("expected coordinate " + c1 + " but found " + c2, c1.x == c2.x && c1.y == c2.y);
+ }
+
+ private static boolean isRing(Coordinate[] c) {
+ return (c[0].x == c[c.length-1].x) && (c[0].y == c[c.length-1].y);
+ }
+
+ public static void assertEquals(Coordinate[] c1, Coordinate[] c2) {
+ Assert.assertEquals(c1.length, c2.length);
+
+ if(isRing(c1) && isRing(c2)) {
+ c1 = fixedOrderedRing(c1, true);
+ c2 = fixedOrderedRing(c2, true);
+ }
+
+ for (int i = 0; i < c2.length; i++) {
+ assertEquals(c1[i], c2[i]);
+ }
+ }
+
+ public static void assertEquals(LineString l1, LineString l2) {
+ assertEquals(l1.getCoordinates(), l2.getCoordinates());
+ }
+
+ public static void assertEquals(Polygon p1, Polygon p2) {
+ Assert.assertEquals(p1.getNumInteriorRing(), p2.getNumInteriorRing());
+
+ assertEquals(p1.getExteriorRing(), p2.getExteriorRing());
+
+ // TODO: This test do not check all permutations of linestrings. So the test
+ // fails if the holes of the polygons are not ordered the same way
+ for (int i = 0; i < p1.getNumInteriorRing(); i++) {
+ assertEquals(p1.getInteriorRingN(i), p2.getInteriorRingN(i));
+ }
+ }
+
+ public static void assertEquals(MultiPolygon p1, MultiPolygon p2) {
+ Assert.assertEquals(p1.getNumGeometries(), p2.getNumGeometries());
+
+ // TODO: This test do not check all permutations. So the Test fails
+ // if the inner polygons are not ordered the same way in both Multipolygons
+ for (int i = 0; i < p1.getNumGeometries(); i++) {
+ Geometry a = p1.getGeometryN(i);
+ Geometry b = p2.getGeometryN(i);
+ assertEquals(a, b);
+ }
+ }
+
+ public static void assertEquals(Geometry s1, Geometry s2) {
+ if(s1 instanceof LineString && s2 instanceof LineString) {
+ assertEquals((LineString) s1, (LineString) s2);
+
+ } else if (s1 instanceof Polygon && s2 instanceof Polygon) {
+ assertEquals((Polygon) s1, (Polygon) s2);
+
+ } else if (s1 instanceof MultiPoint && s2 instanceof MultiPoint) {
+ Assert.assertEquals(s1, s2);
+
+ } else if (s1 instanceof MultiPolygon && s2 instanceof MultiPolygon) {
+ assertEquals((MultiPolygon) s1, (MultiPolygon) s2);
+
+ } else {
+ throw new RuntimeException("equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ public static void assertEquals(JtsGeometry g1, JtsGeometry g2) {
+ assertEquals(g1.getGeom(), g2.getGeom());
+ }
+
+ public static void assertEquals(Shape s1, Shape s2) {
+ if(s1 instanceof JtsGeometry && s2 instanceof JtsGeometry) {
+ assertEquals((JtsGeometry) s1, (JtsGeometry) s2);
+ } else if(s1 instanceof JtsPoint && s2 instanceof JtsPoint) {
+ JtsPoint p1 = (JtsPoint) s1;
+ JtsPoint p2 = (JtsPoint) s2;
+ Assert.assertEquals(p1, p2);
+ } else {
+ throw new RuntimeException("equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ private static Geometry unwrap(Shape shape) {
+ assertThat(shape, instanceOf(JtsGeometry.class));
+ return ((JtsGeometry)shape).getGeom();
+ }
+
+ public static void assertMultiPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof MultiPolygon): "expected MultiPolygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof Polygon): "expected Polygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertLineString(Shape shape) {
+ assert(unwrap(shape) instanceof LineString): "expected LineString but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertMultiLineString(Shape shape) {
+ assert(unwrap(shape) instanceof MultiLineString): "expected MultiLineString but found " + unwrap(shape).getClass().getName();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
new file mode 100644
index 0000000..f49cc3b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.search.SearchHit;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+public class ElasticsearchMatchers {
+
+ public static class SearchHitHasIdMatcher extends TypeSafeMatcher<SearchHit> {
+ private String id;
+
+ public SearchHitHasIdMatcher(String id) {
+ this.id = id;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getId().equals(id);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getId());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit id should be ").appendValue(id);
+ }
+ }
+
+ public static class SearchHitHasTypeMatcher extends TypeSafeMatcher<SearchHit> {
+ private String type;
+
+ public SearchHitHasTypeMatcher(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getType().equals(type);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getType());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit type should be ").appendValue(type);
+ }
+ }
+
+ public static class SearchHitHasIndexMatcher extends TypeSafeMatcher<SearchHit> {
+ private String index;
+
+ public SearchHitHasIndexMatcher(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getIndex().equals(index);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getIndex());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit index should be ").appendValue(index);
+ }
+ }
+
+ public static class SearchHitHasScoreMatcher extends TypeSafeMatcher<SearchHit> {
+ private float score;
+
+ public SearchHitHasScoreMatcher(float score) {
+ this.score = score;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getScore() == score;
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getScore());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit score should be ").appendValue(score);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
new file mode 100644
index 0000000..62c35e5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.util.regex.Pattern;
+
+/**
+ * Matcher that supports regular expression and allows to provide optional flags
+ */
+public class RegexMatcher extends TypeSafeMatcher<String> {
+
+ private final String regex;
+ private final Pattern pattern;
+
+ public RegexMatcher(String regex) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex);
+ }
+
+ public RegexMatcher(String regex, int flag) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex, flag);
+ }
+
+ @Override
+ protected boolean matchesSafely(String item) {
+ return pattern.matcher(item).find();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(regex);
+ }
+
+ public static RegexMatcher matches(String regex) {
+ return new RegexMatcher(regex);
+ }
+
+ public static RegexMatcher matches(String regex, int flag) {
+ return new RegexMatcher(regex, flag);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/annotations/Network.java b/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
new file mode 100644
index 0000000..d2615ea
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+
+import java.lang.annotation.*;
+
+/**
+ * Annotation used to set if internet network connectivity is required to run the test.
+ * By default, tests annotated with @Network won't be executed.
+ * Set -Dtests.network=true when running test to launch network tests
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Inherited
+@TestGroup(enabled = false, sysProperty = "tests.network")
+public @interface Network {
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
new file mode 100644
index 0000000..e09cc75
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.ElementType.PACKAGE;
+import static java.lang.annotation.ElementType.TYPE;
+
+/**
+ * Annotation used to set a custom log level for a specific test method.
+ *
+ * It supports multiple logger:level comma separated key value pairs
+ * Use the _root keyword to set the root logger level
+ * e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
+ * or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({PACKAGE, TYPE, METHOD})
+public @interface TestLogging {
+ String value();
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
new file mode 100644
index 0000000..77793ee
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.runner.Description;
+import org.junit.runner.Result;
+import org.junit.runner.notification.RunListener;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A {@link RunListener} that allows to change the log level for a specific test method.
+ * When a test method is annotated with the {@link org.elasticsearch.test.junit.annotations.TestLogging} annotation, the level for the specified loggers
+ * will be internally saved before the test method execution and overridden with the specified ones.
+ * At the end of the test method execution the original loggers levels will be restored.
+ *
+ * Note: This class is not thread-safe. Given the static nature of the logging api, it assumes that tests
+ * are never run concurrently in the same jvm. For the very same reason no synchronization has been implemented
+ * regarding the save/restore process of the original loggers levels.
+ */
+public class LoggingListener extends RunListener {
+
+ private Map<String, String> previousLoggingMap;
+ private Map<String, String> previousClassLoggingMap;
+ private Map<String, String> previousPackageLoggingMap;
+
+ @Override
+ public void testRunStarted(Description description) throws Exception {
+ previousPackageLoggingMap = processTestLogging( description.getTestClass().getPackage().getAnnotation(TestLogging.class));
+ previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class));
+ }
+
+ @Override
+ public void testRunFinished(Result result) throws Exception {
+ previousClassLoggingMap = reset(previousClassLoggingMap);
+ previousPackageLoggingMap = reset(previousPackageLoggingMap);
+ }
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ final TestLogging testLogging = description.getAnnotation(TestLogging.class);
+ previousLoggingMap = processTestLogging(testLogging);
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ previousLoggingMap = reset(previousLoggingMap);
+ }
+
+ private static ESLogger resolveLogger(String loggerName) {
+ if (loggerName.equalsIgnoreCase("_root")) {
+ return ESLoggerFactory.getRootLogger();
+ }
+ return Loggers.getLogger(loggerName);
+ }
+
+ private Map<String, String> processTestLogging(TestLogging testLogging) {
+ if (testLogging == null) {
+ return null;
+ }
+ Map<String, String> map = new HashMap<String, String>();
+ final String[] loggersAndLevels = testLogging.value().split(",");
+ for (String loggerAndLevel : loggersAndLevels) {
+ String[] loggerAndLevelArray = loggerAndLevel.split(":");
+ if (loggerAndLevelArray.length >=2) {
+ String loggerName = loggerAndLevelArray[0];
+ String level = loggerAndLevelArray[1];
+ ESLogger esLogger = resolveLogger(loggerName);
+ map.put(loggerName, esLogger.getLevel());
+ esLogger.setLevel(level);
+ }
+ }
+ return map;
+ }
+
+ private Map<String, String> reset(Map<String, String> map) {
+ if (map != null) {
+ for (Map.Entry<String, String> previousLogger : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(previousLogger.getKey());
+ esLogger.setLevel(previousLogger.getValue());
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
new file mode 100644
index 0000000..5a1e47b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.junit.internal.AssumptionViolatedException;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS;
+
+/**
+ * A {@link RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick test re-run under MVN command line.
+ */
+public class ReproduceInfoPrinter extends RunListener {
+
+ protected final ESLogger logger = Loggers.getLogger(ElasticsearchTestCase.class);
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ logger.info("Test {} started", description.getDisplayName());
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ logger.info("Test {} finished", description.getDisplayName());
+ }
+
+ @Override
+ public void testFailure(Failure failure) throws Exception {
+ // Ignore assumptions.
+ if (failure.getException() instanceof AssumptionViolatedException) {
+ return;
+ }
+
+ final Description d = failure.getDescription();
+ final StringBuilder b = new StringBuilder();
+ b.append("FAILURE : ").append(d.getDisplayName()).append("\n");
+ b.append("REPRODUCE WITH : mvn test");
+ reproduceErrorMessageBuilder(b).appendAllOpts(failure.getDescription());
+
+ b.append("\n");
+ b.append("Throwable:\n");
+ if (failure.getException() != null) {
+ traces().formatThrowable(b, failure.getException());
+ }
+
+ logger.error(b.toString());
+ }
+
+ protected ReproduceErrorMessageBuilder reproduceErrorMessageBuilder(StringBuilder b) {
+ return new MavenMessageBuilder(b);
+ }
+
+ protected TraceFormatting traces() {
+ TraceFormatting traces = new TraceFormatting();
+ try {
+ traces = RandomizedContext.current().getRunner().getTraceFormatting();
+ } catch (IllegalStateException e) {
+ // Ignore if no context.
+ }
+ return traces;
+ }
+
+ protected static class MavenMessageBuilder extends ReproduceErrorMessageBuilder {
+
+ public MavenMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+ super.appendAllOpts(description);
+ return appendESProperties();
+ }
+
+ /**
+ * Append a single VM option.
+ */
+ @Override
+ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) {
+ if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there!
+ return this;
+ }
+ if (Strings.hasLength(value)) {
+ return super.appendOpt(sysPropName, value);
+ }
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendESProperties() {
+ appendProperties("es.logger.level", "es.node.mode", "es.node.local", TestCluster.TESTS_ENABLE_MOCK_MODULES,
+ "tests.assertion.disabled", "tests.security.manager", "tests.nighly", "tests.jvms", "tests.client.ratio");
+ if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
+ appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
+ }
+ return this;
+ }
+
+ protected ReproduceErrorMessageBuilder appendProperties(String... properties) {
+ for (String sysPropName : properties) {
+ if (Strings.hasLength(System.getProperty(sysPropName))) {
+ appendOpt(sysPropName, System.getProperty(sysPropName));
+ }
+ }
+ return this;
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java
new file mode 100644
index 0000000..26841aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTests.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.test.rest.junit.RestTestSuiteRunner;
+import org.junit.runner.RunWith;
+
+import static org.apache.lucene.util.LuceneTestCase.Slow;
+
+/**
+ * Runs the clients test suite against an elasticsearch node, which can be an external node or an automatically created cluster.
+ * Communicates with elasticsearch exclusively via REST layer.
+ *
+ * @see RestTestSuiteRunner for extensive documentation and all the supported options
+ */
+@Slow
+@RunWith(RestTestSuiteRunner.class)
+public class ElasticsearchRestTests {
+
+
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
new file mode 100644
index 0000000..03b19c1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.client.RestClient;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Execution context passed across the REST tests.
+ * Holds the REST client used to communicate with elasticsearch.
+ * Caches the last obtained test response and allows to stash part of it within variables
+ * that can be used as input values in following requests.
+ */
+public class RestTestExecutionContext implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestExecutionContext.class);
+
+ private final RestClient restClient;
+
+ private final String esVersion;
+
+ private final Stash stash = new Stash();
+
+ private RestResponse response;
+
+ public RestTestExecutionContext(InetSocketAddress[] addresses, RestSpec restSpec) throws RestException, IOException {
+ this.restClient = new RestClient(addresses, restSpec);
+ this.esVersion = restClient.getEsVersion();
+ }
+
+ /**
+ * Calls an elasticsearch api with the parameters and request body provided as arguments.
+ * Saves the obtained response in the execution context.
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies) throws IOException, RestException {
+ //makes a copy of the parameters before modifying them for this specific request
+ HashMap<String, String> requestParams = Maps.newHashMap(params);
+ for (Map.Entry<String, String> entry : requestParams.entrySet()) {
+ if (stash.isStashedValue(entry.getValue())) {
+ entry.setValue(stash.unstashValue(entry.getValue()).toString());
+ }
+ }
+
+ String body = actualBody(bodies);
+
+ try {
+ response = callApiInternal(apiName, requestParams, body);
+ //we always stash the last response body
+ stash.stashValue("body", response.getBody());
+ return response;
+ } catch(RestException e) {
+ response = e.restResponse();
+ throw e;
+ }
+ }
+
+ private String actualBody(List<Map<String, Object>> bodies) throws IOException {
+ if (bodies.isEmpty()) {
+ return "";
+ }
+
+ if (bodies.size() == 1) {
+ return bodyAsString(stash.unstashMap(bodies.get(0)));
+ }
+
+ StringBuilder bodyBuilder = new StringBuilder();
+ for (Map<String, Object> body : bodies) {
+ bodyBuilder.append(bodyAsString(stash.unstashMap(body))).append("\n");
+ }
+ return bodyBuilder.toString();
+ }
+
+ private String bodyAsString(Map<String, Object> body) throws IOException {
+ return XContentFactory.jsonBuilder().map(body).string();
+ }
+
+ /**
+ * Calls an elasticsearch api internally without saving the obtained response in the context.
+ * Useful for internal calls (e.g. delete index during teardown)
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApiInternal(String apiName, String... params) throws IOException, RestException {
+ return restClient.callApi(apiName, params);
+ }
+
+ private RestResponse callApiInternal(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+ return restClient.callApi(apiName, params, body);
+ }
+
+ /**
+ * Extracts a specific value from the last saved response
+ */
+ public Object response(String path) throws IOException {
+ return response.evaluate(path);
+ }
+
+ /**
+ * Clears the last obtained response and the stashed fields
+ */
+ public void clear() {
+ logger.debug("resetting response and stash");
+ response = null;
+ stash.clear();
+ }
+
+ public Stash stash() {
+ return stash;
+ }
+
+ /**
+ * Returns the current es version as a string
+ */
+ public String esVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Closes the execution context and releases the underlying resources
+ */
+ public void close() {
+ this.restClient.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/Stash.java b/src/test/java/org/elasticsearch/test/rest/Stash.java
new file mode 100644
index 0000000..3179ecc
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/Stash.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Allows to cache the last obtained test response and or part of it within variables
+ * that can be used as input values in following requests and assertions.
+ */
+public class Stash {
+
+ private static final ESLogger logger = Loggers.getLogger(Stash.class);
+
+ private final Map<String, Object> stash = Maps.newHashMap();
+
+ /**
+ * Allows to saved a specific field in the stash as key-value pair
+ */
+ public void stashValue(String key, Object value) {
+ logger.debug("stashing [{}]=[{}]", key, value);
+ Object old = stash.put(key, value);
+ if (old != null && old != value) {
+ logger.trace("replaced stashed value [{}] with same key [{}]", old, key);
+ }
+ }
+
+ /**
+ * Clears the previously stashed values
+ */
+ public void clear() {
+ stash.clear();
+ }
+
+ /**
+ * Tells whether a particular value needs to be looked up in the stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public boolean isStashedValue(Object key) {
+ if (key == null) {
+ return false;
+ }
+ String stashKey = key.toString();
+ return Strings.hasLength(stashKey) && stashKey.startsWith("$");
+ }
+
+ /**
+ * Extracts a value from the current stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public Object unstashValue(String value) {
+ Object stashedValue = stash.get(value.substring(1));
+ if (stashedValue == null) {
+ throw new IllegalArgumentException("stashed value not found for key [" + value + "]");
+ }
+ return stashedValue;
+ }
+
+ /**
+ * Recursively unstashes map values if needed
+ */
+ public Map<String, Object> unstashMap(Map<String, Object> map) {
+ Map<String, Object> copy = Maps.newHashMap(map);
+ unstashObject(copy);
+ return copy;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void unstashObject(Object obj) {
+ if (obj instanceof List) {
+ List list = (List)obj;
+ for (int i = 0; i < list.size(); i++) {
+ Object o = list.get(i);
+ if (isStashedValue(o)) {
+ list.set(i, unstashValue(o.toString()));
+ } else {
+ unstashObject(o);
+ }
+ }
+ }
+ if (obj instanceof Map) {
+ Map<String, Object> map = (Map) obj;
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (isStashedValue(entry.getValue())) {
+ entry.setValue(unstashValue(entry.getValue().toString()));
+ } else {
+ unstashObject(entry.getValue());
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestClient.java b/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
new file mode 100644
index 0000000..d038b14
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * REST client used to test the elasticsearch REST layer
+ * Holds the {@link RestSpec} used to translate api calls into REST calls
+ */
+public class RestClient implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestClient.class);
+
+ private final RestSpec restSpec;
+ private final CloseableHttpClient httpClient;
+
+ private final InetSocketAddress[] addresses;
+
+ private final String esVersion;
+
+ public RestClient(InetSocketAddress[] addresses, RestSpec restSpec) throws IOException, RestException {
+ assert addresses.length > 0;
+ this.restSpec = restSpec;
+ this.httpClient = createHttpClient();
+ this.addresses = addresses;
+ this.esVersion = readAndCheckVersion();
+ logger.info("REST client initialized {}, elasticsearch version: [{}]", addresses, esVersion);
+ }
+
+ private String readAndCheckVersion() throws IOException, RestException {
+ //we make a manual call here without using callApi method, mainly because we are initializing
+ //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise)
+ RestApi restApi = restApi("info");
+ assert restApi.getPaths().size() == 1;
+ assert restApi.getMethods().size() == 1;
+
+ String version = null;
+ for (InetSocketAddress address : addresses) {
+ RestResponse restResponse = new RestResponse(new HttpRequestBuilder(httpClient)
+ .host(address.getHostName()).port(address.getPort())
+ .path(restApi.getPaths().get(0))
+ .method(restApi.getMethods().get(0)).execute());
+ checkStatusCode(restResponse);
+
+ Object latestVersion = restResponse.evaluate("version.number");
+ if (latestVersion == null) {
+ throw new RuntimeException("elasticsearch version not found in the response");
+ }
+ if (version == null) {
+ version = latestVersion.toString();
+ } else {
+ if (!latestVersion.equals(version)) {
+ throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions");
+ }
+ }
+ }
+ return version;
+ }
+
+ public String getEsVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Calls an api with the provided parameters
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, String... params) throws IOException, RestException {
+ if (params.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of params passed must be even but was [" + params.length + "]");
+ }
+
+ Map<String, String> paramsMap = Maps.newHashMap();
+ for (int i = 0; i < params.length; i++) {
+ paramsMap.put(params[i++], params[i]);
+ }
+
+ return callApi(apiName, paramsMap, null);
+ }
+
+ /**
+ * Calls an api with the provided parameters and body
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+
+ List<Integer> ignores = Lists.newArrayList();
+ Map<String, String> requestParams = null;
+ if (params != null) {
+ //makes a copy of the parameters before modifying them for this specific request
+ requestParams = Maps.newHashMap(params);
+ //ignore is a special parameter supported by the clients, shouldn't be sent to es
+ String ignoreString = requestParams.remove("ignore");
+ if (Strings.hasLength(ignoreString)) {
+ try {
+ ignores.add(Integer.valueOf(ignoreString));
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead");
+ }
+ }
+ }
+
+ HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
+ logger.debug("calling api [{}]", apiName);
+ HttpResponse httpResponse = httpRequestBuilder.execute();
+
+ //http HEAD doesn't support response body
+ // For the few api (exists class of api) that use it we need to accept 404 too
+ if (!httpResponse.supportsBody()) {
+ ignores.add(404);
+ }
+
+ RestResponse restResponse = new RestResponse(httpResponse);
+ checkStatusCode(restResponse, ignores);
+ return restResponse;
+ }
+
+ private void checkStatusCode(RestResponse restResponse, List<Integer> ignores) throws RestException {
+ //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back
+ if (ignores.contains(restResponse.getStatusCode())) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("ignored non ok status codes {} as requested", ignores);
+ }
+ return;
+ }
+ checkStatusCode(restResponse);
+ }
+
+ private void checkStatusCode(RestResponse restResponse) throws RestException {
+ if (restResponse.isError()) {
+ throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse);
+ }
+ }
+
+ private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) {
+
+ //create doesn't exist in the spec but is supported in the clients (index with op_type=create)
+ boolean indexCreateApi = "create".equals(apiName);
+ String api = indexCreateApi ? "index" : apiName;
+ RestApi restApi = restApi(api);
+
+ HttpRequestBuilder httpRequestBuilder = httpRequestBuilder();
+
+ if (Strings.hasLength(body)) {
+ if (!restApi.isBodySupported()) {
+ throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.body(body);
+ } else {
+ if (restApi.isBodyRequired()) {
+ throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api");
+ }
+ }
+
+ //divide params between ones that go within query string and ones that go within path
+ Map<String, String> pathParts = Maps.newHashMap();
+ if (params != null) {
+ for (Map.Entry<String, String> entry : params.entrySet()) {
+ if (restApi.getPathParts().contains(entry.getKey())) {
+ pathParts.put(entry.getKey(), entry.getValue());
+ } else {
+ if (!restApi.getParams().contains(entry.getKey())) {
+ throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.addParam(entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ if (indexCreateApi) {
+ httpRequestBuilder.addParam("op_type", "create");
+ }
+
+ //the http method is randomized (out of the available ones with the chosen api)
+ return httpRequestBuilder.method(RandomizedTest.randomFrom(restApi.getSupportedMethods(pathParts.keySet())))
+ .path(RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts)));
+ }
+
+ private RestApi restApi(String apiName) {
+ RestApi restApi = restSpec.getApi(apiName);
+ if (restApi == null) {
+ throw new IllegalArgumentException("rest api [" + apiName + "] doesn't exist in the rest spec");
+ }
+ return restApi;
+ }
+
+ protected HttpRequestBuilder httpRequestBuilder() {
+ //the address used is randomized between the available ones
+ InetSocketAddress address = RandomizedTest.randomFrom(addresses);
+ return new HttpRequestBuilder(httpClient).host(address.getHostName()).port(address.getPort());
+ }
+
+ protected CloseableHttpClient createHttpClient() {
+ return HttpClients.createDefault();
+ }
+
+ /**
+ * Closes the REST client and the underlying http client
+ */
+ public void close() {
+ try {
+ httpClient.close();
+ } catch(IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestException.java b/src/test/java/org/elasticsearch/test/rest/client/RestException.java
new file mode 100644
index 0000000..2236134
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+/**
+ * Thrown when a status code that holds an error is received (unless needs to be ignored)
+ * Holds the original {@link RestResponse}
+ */
+public class RestException extends Exception {
+
+ private final RestResponse restResponse;
+
+ public RestException(String message, RestResponse restResponse) {
+ super(message);
+ this.restResponse = restResponse;
+ }
+
+ public RestResponse restResponse() {
+ return restResponse;
+ }
+
+ public int statusCode() {
+ return restResponse.getStatusCode();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java b/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
new file mode 100644
index 0000000..fd6df25
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.json.JsonPath;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from a REST call
+ * Supports parsing the response body as json when needed and returning specific values extracted from it
+ */
+public class RestResponse {
+
+ private final HttpResponse response;
+ private JsonPath parsedResponse;
+
+ RestResponse(HttpResponse response) {
+ this.response = response;
+ }
+
+ public int getStatusCode() {
+ return response.getStatusCode();
+ }
+
+ public String getReasonPhrase() {
+ return response.getReasonPhrase();
+ }
+
+ public String getBody() {
+ return response.getBody();
+ }
+
+ public boolean isError() {
+ return response.isError();
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path) throws IOException {
+
+ if (response == null) {
+ return null;
+ }
+
+ JsonPath jsonPath = parsedResponse();
+
+ if (jsonPath == null) {
+ //special case: api that don't support body (e.g. exists) return true if 200, false if 404, even if no body
+ //is_true: '' means the response had no body but the client returned true (caused by 200)
+ //is_false: '' means the response had no body but the client returned false (caused by 404)
+ if ("".equals(path) && !response.supportsBody()) {
+ return !response.isError();
+ }
+ return null;
+ }
+
+ return jsonPath.evaluate(path);
+ }
+
+ private JsonPath parsedResponse() throws IOException {
+ if (parsedResponse != null) {
+ return parsedResponse;
+ }
+ if (response == null || !response.hasBody()) {
+ return null;
+ }
+ return parsedResponse = new JsonPath(response.getBody());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
new file mode 100644
index 0000000..480fc7b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send DELETE requests providing a body (not supported out of the box)
+ */
+public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "DELETE";
+
+ public HttpDeleteWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
new file mode 100644
index 0000000..aa0129f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send GET requests providing a body (not supported out of the box)
+ */
+public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "GET";
+
+ public HttpGetWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
new file mode 100644
index 0000000..a6487a6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.*;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.Charset;
+import java.util.Map;
+
+/**
+ * Executable builder for an http request
+ * Holds an {@link org.apache.http.client.HttpClient} that is used to send the built http request
+ */
+public class HttpRequestBuilder {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpRequestBuilder.class);
+
+ static final Charset DEFAULT_CHARSET = Charset.forName("utf-8");
+
+ private final CloseableHttpClient httpClient;
+
+ private String host;
+
+ private int port;
+
+ private String path = "";
+
+ private final Map<String, String> params = Maps.newHashMap();
+
+ private String method = HttpGetWithEntity.METHOD_NAME;
+
+ private String body;
+
+ public HttpRequestBuilder(CloseableHttpClient httpClient) {
+ this.httpClient = httpClient;
+ }
+
+ public HttpRequestBuilder host(String host) {
+ this.host = host;
+ return this;
+ }
+
+ public HttpRequestBuilder port(int port) {
+ this.port = port;
+ return this;
+ }
+
+ public HttpRequestBuilder path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ public HttpRequestBuilder addParam(String name, String value) {
+ this.params.put(name, value);
+ return this;
+ }
+
+ public HttpRequestBuilder method(String method) {
+ this.method = method;
+ return this;
+ }
+
+ public HttpRequestBuilder body(String body) {
+ if (Strings.hasLength(body)) {
+ this.body = body;
+ }
+ return this;
+ }
+
+ public HttpResponse execute() throws IOException {
+ CloseableHttpResponse closeableHttpResponse = null;
+ try {
+ HttpUriRequest httpUriRequest = buildRequest();
+ if (logger.isTraceEnabled()) {
+ StringBuilder stringBuilder = new StringBuilder(httpUriRequest.getMethod()).append(" ").append(httpUriRequest.getURI());
+ if (Strings.hasLength(body)) {
+ stringBuilder.append("\n").append(body);
+ }
+ logger.trace("sending request \n{}", stringBuilder.toString());
+ }
+ closeableHttpResponse = httpClient.execute(httpUriRequest);
+ HttpResponse httpResponse = new HttpResponse(httpUriRequest, closeableHttpResponse);
+ logger.trace("got response \n{}\n{}", closeableHttpResponse, httpResponse.hasBody() ? httpResponse.getBody() : "");
+ return httpResponse;
+ } finally {
+ try {
+ IOUtils.close(closeableHttpResponse);
+ } catch (IOException e) {
+ logger.error("error closing http response", e);
+ }
+ }
+ }
+
+ private HttpUriRequest buildRequest() {
+
+ if (HttpGetWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpGetWithEntity(buildUri()));
+ }
+
+ if (HttpHead.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpHead(buildUri());
+ }
+
+ if (HttpDeleteWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpDeleteWithEntity(buildUri()));
+ }
+
+ if (HttpPut.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPut(buildUri()));
+ }
+
+ if (HttpPost.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPost(buildUri()));
+ }
+
+ throw new UnsupportedOperationException("method [" + method + "] not supported");
+ }
+
+ private URI buildUri() {
+ String query;
+ if (params.size() == 0) {
+ query = null;
+ } else {
+ query = Joiner.on('&').withKeyValueSeparator("=").join(params);
+ }
+ try {
+ return new URI("http", null, host, port, path, query, null);
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+
+ private HttpEntityEnclosingRequestBase addOptionalBody(HttpEntityEnclosingRequestBase requestBase) {
+ if (Strings.hasText(body)) {
+ requestBase.setEntity(new StringEntity(body, DEFAULT_CHARSET));
+ }
+ return requestBase;
+ }
+
+ private void checkBodyNotSupported() {
+ if (Strings.hasText(body)) {
+ throw new IllegalArgumentException("request body not supported with head request");
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(method).append(" '")
+ .append(host).append(":").append(port).append(path).append("'");
+ if (!params.isEmpty()) {
+ stringBuilder.append(", params=").append(params);
+ }
+ if (Strings.hasLength(body)) {
+ stringBuilder.append(", body=\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
new file mode 100644
index 0000000..84ac372
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from an http request
+ * Always consumes the whole response body loading it entirely into a string
+ */
+public class HttpResponse {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpResponse.class);
+
+ private final HttpUriRequest httpRequest;
+ private final int statusCode;
+ private final String reasonPhrase;
+ private final String body;
+
+ HttpResponse(HttpUriRequest httpRequest, CloseableHttpResponse httpResponse) {
+ this.httpRequest = httpRequest;
+ this.statusCode = httpResponse.getStatusLine().getStatusCode();
+ this.reasonPhrase = httpResponse.getStatusLine().getReasonPhrase();
+ if (httpResponse.getEntity() != null) {
+ try {
+ this.body = EntityUtils.toString(httpResponse.getEntity(), HttpRequestBuilder.DEFAULT_CHARSET);
+ } catch (IOException e) {
+ EntityUtils.consumeQuietly(httpResponse.getEntity());
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ httpResponse.close();
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+ } else {
+ this.body = null;
+ }
+ }
+
+ public boolean isError() {
+ return statusCode >= 400;
+ }
+
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ public String getReasonPhrase() {
+ return reasonPhrase;
+ }
+
+ public String getBody() {
+ return body;
+ }
+
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ public boolean supportsBody() {
+ return !HttpHead.METHOD_NAME.equals(httpRequest.getMethod());
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(statusCode).append(" ").append(reasonPhrase);
+ if (hasBody()) {
+ stringBuilder.append("\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java b/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
new file mode 100644
index 0000000..12e9aa3
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.json;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Holds a json object and allows to extract specific values from it
+ */
+public class JsonPath {
+
+ final String json;
+ final Map<String, Object> jsonMap;
+
+ public JsonPath(String json) throws IOException {
+ this.json = json;
+ this.jsonMap = convertToMap(json);
+ }
+
+ private static Map<String, Object> convertToMap(String json) throws IOException {
+ return JsonXContent.jsonXContent.createParser(json).mapOrderedAndClose();
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path) {
+ String[] parts = parsePath(path);
+ Object object = jsonMap;
+ for (String part : parts) {
+ object = evaluate(part, object);
+ if (object == null) {
+ return null;
+ }
+ }
+ return object;
+ }
+
+ @SuppressWarnings("unchecked")
+ private Object evaluate(String key, Object object) {
+ if (object instanceof Map) {
+ return ((Map<String, Object>) object).get(key);
+ }
+ if (object instanceof List) {
+ List<Object> list = (List<Object>) object;
+ try {
+ return list.get(Integer.valueOf(key));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e);
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("element was a list with " + list.size() + " elements, but [" + key + "] was out of bounds", e);
+ }
+ }
+
+ throw new IllegalArgumentException("no object found for [" + key + "] within object of class [" + object.getClass() + "]");
+ }
+
+ private String[] parsePath(String path) {
+ List<String> list = Lists.newArrayList();
+ StringBuilder current = new StringBuilder();
+ boolean escape = false;
+ for (int i = 0; i < path.length(); i++) {
+ char c = path.charAt(i);
+ if (c == '\\') {
+ escape = true;
+ continue;
+ }
+
+ if (c == '.') {
+ if (escape) {
+ escape = false;
+ } else {
+ if (current.length() > 0) {
+ list.add(current.toString());
+ current.setLength(0);
+ }
+ continue;
+ }
+ }
+
+ current.append(c);
+ }
+
+ if (current.length() > 0) {
+ list.add(current.toString());
+ }
+
+ return list.toArray(new String[list.size()]);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java b/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java
new file mode 100644
index 0000000..44284a4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/DescriptionHelper.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.google.common.base.Joiner;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.junit.runner.Description;
+
+import java.util.Map;
+
+/**
+ * Helper that knows how to assign proper junit {@link Description}s to each of the node in the tests tree
+ */
+public final class DescriptionHelper {
+
+ private DescriptionHelper() {
+
+ }
+
+ /*
+ The following generated ids need to be unique throughout a tests run.
+ Ids are also shown by IDEs (with junit 4.11 unique ids can be different from what gets shown, not yet in 4.10).
+ Some tricks are applied to control what gets shown in IDEs in order to keep the ids unique and nice to see at the same time.
+ */
+
+ static Description createRootDescription(String name) {
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createApiDescription(String api) {
+ return Description.createSuiteDescription(api);
+ }
+
+ static Description createTestSuiteDescription(RestTestSuite restTestSuite) {
+ //e.g. "indices.open (10_basic)", which leads to 10_basic being shown by IDEs
+ String name = restTestSuite.getApi() + " (" + restTestSuite.getName() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createTestSectionWithRepetitionsDescription(RestTestSuite restTestSuite, TestSection testSection) {
+ //e.g. "indices.open/10_basic (Basic test for index open/close)", which leads to
+ //"Basic test for index open/close" being shown by IDEs
+ String name = restTestSuite.getDescription() + " (" + testSection.getName() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ static Description createTestSectionIterationDescription(RestTestSuite restTestSuite, TestSection testSection, Map<String, Object> args) {
+ //e.g. "Basic test for index open/close {#0} (indices.open/10_basic)" some IDEs might strip out the part between parentheses
+ String name = testSection.getName() + formatMethodArgs(args) + " (" + restTestSuite.getDescription() + ")";
+ return Description.createSuiteDescription(name);
+ }
+
+ private static String formatMethodArgs(Map<String, Object> args) {
+ if (args == null || args.isEmpty()) return "";
+
+ StringBuilder b = new StringBuilder(" {");
+ Joiner.on(" ").withKeyValueSeparator("").appendTo(b, args);
+ b.append("}");
+
+ return b.toString();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java b/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java
new file mode 100644
index 0000000..735fbf6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestReproduceInfoPrinter.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.StandaloneRandomizedContext;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.elasticsearch.test.rest.ElasticsearchRestTests;
+import org.elasticsearch.test.rest.junit.RestTestSuiteRunner.RunMode;
+import org.junit.runner.Description;
+
+import java.util.Arrays;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_RANDOM_SEED;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTCLASS;
+import static org.elasticsearch.test.rest.junit.RestTestSuiteRunner.*;
+
+/**
+ * A {@link org.junit.runner.notification.RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick REST test re-run under MVN command line.
+ */
+class RestReproduceInfoPrinter extends ReproduceInfoPrinter {
+
+ protected static final ESLogger logger = Loggers.getLogger(RestReproduceInfoPrinter.class);
+
+ private static boolean isTestCluster() {
+ return runMode() == RunMode.TEST_CLUSTER;
+ }
+
+ @Override
+ protected TraceFormatting traces() {
+ return new TraceFormatting(
+ Arrays.asList(
+ "org.junit.",
+ "junit.framework.",
+ "sun.",
+ "java.lang.reflect.",
+ "com.carrotsearch.randomizedtesting.",
+ "org.elasticsearch.test.rest.junit."
+ ));
+ }
+
+ @Override
+ protected ReproduceErrorMessageBuilder reproduceErrorMessageBuilder(StringBuilder b) {
+ return new MavenMessageBuilder(b);
+ }
+
+ private static class MavenMessageBuilder extends ReproduceInfoPrinter.MavenMessageBuilder {
+
+ public MavenMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+
+ try {
+ appendOpt(SYSPROP_RANDOM_SEED(), StandaloneRandomizedContext.getSeedAsString());
+ } catch (IllegalStateException e) {
+ logger.warn("No context available when dumping reproduce options?");
+ }
+
+ //we know that ElasticsearchRestTests is the only one that runs with RestTestSuiteRunner
+ appendOpt(SYSPROP_TESTCLASS(), ElasticsearchRestTests.class.getName());
+
+ if (description.getClassName() != null) {
+ appendOpt(REST_TESTS_SUITE, description.getClassName());
+ }
+
+ appendRunnerProperties();
+ appendEnvironmentSettings();
+
+ appendProperties("es.logger.level");
+
+ if (isTestCluster()) {
+ appendProperties("es.node.mode", "es.node.local");
+ }
+
+ appendRestTestsProperties();
+
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendRestTestsProperties() {
+ return appendProperties(REST_TESTS_MODE, REST_TESTS_SPEC);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java b/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java
new file mode 100644
index 0000000..2d7544d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestTestCandidate.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.junit.runner.Description;
+
+/**
+ * Wraps {@link org.elasticsearch.test.rest.section.TestSection}s ready to be run,
+ * properly enriched with the needed execution information.
+ * The tests tree structure gets flattened to the leaves (test sections)
+ */
+public class RestTestCandidate {
+
+ private final RestTestSuite restTestSuite;
+ private final Description suiteDescription;
+ private final TestSection testSection;
+ private final Description testDescription;
+ private final long seed;
+
+ static RestTestCandidate empty(RestTestSuite restTestSuite, Description suiteDescription) {
+ return new RestTestCandidate(restTestSuite, suiteDescription, null, null, -1);
+ }
+
+ RestTestCandidate(RestTestSuite restTestSuite, Description suiteDescription,
+ TestSection testSection, Description testDescription, long seed) {
+ this.restTestSuite = restTestSuite;
+ this.suiteDescription = suiteDescription;
+ this.testSection = testSection;
+ this.testDescription = testDescription;
+ this.seed = seed;
+ }
+
+ public String getApi() {
+ return restTestSuite.getApi();
+ }
+
+ public String getName() {
+ return restTestSuite.getName();
+ }
+
+ public String getSuiteDescription() {
+ return restTestSuite.getDescription();
+ }
+
+ public Description describeSuite() {
+ return suiteDescription;
+ }
+
+ public Description describeTest() {
+ return testDescription;
+ }
+
+ public SetupSection getSetupSection() {
+ return restTestSuite.getSetupSection();
+ }
+
+ public TestSection getTestSection() {
+ return testSection;
+ }
+
+ public long getSeed() {
+ return seed;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java b/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java
new file mode 100644
index 0000000..ab0d17b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RestTestSuiteRunner.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.carrotsearch.hppc.hash.MurmurHash3;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.Randomness;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.elasticsearch.test.rest.spec.RestSpec;
+import org.elasticsearch.test.rest.support.Features;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.ParentRunner;
+import org.junit.runners.model.InitializationError;
+import org.junit.runners.model.Statement;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
+
+import static com.carrotsearch.randomizedtesting.SeedUtils.parseSeedChain;
+import static com.carrotsearch.randomizedtesting.StandaloneRandomizedContext.*;
+import static com.carrotsearch.randomizedtesting.SysGlobals.*;
+import static org.elasticsearch.test.TestCluster.clusterName;
+import static org.elasticsearch.test.rest.junit.DescriptionHelper.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * JUnit runner for elasticsearch REST tests
+ *
+ * Supports the following options provided as system properties:
+ * - tests.rest[true|false|host:port]: determines whether the REST tests need to be run and if so
+ * whether to rely on an external cluster (providing host and port) or fire a test cluster (default)
+ * - tests.rest.suite: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test)
+ * it is possible to run only a subset of the tests providing a directory or a single yaml file
+ * (the default /rest-api-spec/test prefix is optional when files are loaded from classpath)
+ * - tests.rest.section: regex that allows to filter the test sections that are going to be run. If provided, only the
+ * section names that match (case insensitive) against it will be executed
+ * - tests.rest.spec: REST spec path (default /rest-api-spec/api)
+ * - tests.iters: runs multiple iterations
+ * - tests.seed: seed to base the random behaviours on
+ * - tests.appendseed[true|false]: enables adding the seed to each test section's description (default false)
+ *
+ */
+public class RestTestSuiteRunner extends ParentRunner<RestTestCandidate> {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestSuiteRunner.class);
+
+ public static final String REST_TESTS_MODE = "tests.rest";
+ public static final String REST_TESTS_SUITE = "tests.rest.suite";
+ public static final String REST_TESTS_SECTION = "tests.rest.section";
+ public static final String REST_TESTS_SPEC = "tests.rest.spec";
+
+ private static final String DEFAULT_TESTS_PATH = "/rest-api-spec/test";
+ private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api";
+ private static final int DEFAULT_ITERATIONS = 1;
+
+ private static final String PATHS_SEPARATOR = ",";
+
+ private final RestTestExecutionContext restTestExecutionContext;
+ private final List<RestTestCandidate> restTestCandidates;
+ private final Description rootDescription;
+
+ private final RunMode runMode;
+
+ private final TestCluster testCluster;
+
+ private static final AtomicInteger sequencer = new AtomicInteger();
+
+ /** The runner's seed (master). */
+ private final Randomness runnerRandomness;
+
+ /**
+ * If {@link com.carrotsearch.randomizedtesting.SysGlobals#SYSPROP_RANDOM_SEED} property is used with two arguments
+ * (master:test_section) then this field contains test section level override.
+ */
+ private final Randomness testSectionRandomnessOverride;
+
+ enum RunMode {
+ NO, TEST_CLUSTER, EXTERNAL_CLUSTER
+ }
+
+ static RunMode runMode() {
+ String mode = System.getProperty(REST_TESTS_MODE);
+ if (!Strings.hasLength(mode)) {
+ //default true: we run the tests starting our own test cluster
+ mode = Boolean.TRUE.toString();
+ }
+
+ if (Boolean.FALSE.toString().equalsIgnoreCase(mode)) {
+ return RunMode.NO;
+ }
+ if (Boolean.TRUE.toString().equalsIgnoreCase(mode)) {
+ return RunMode.TEST_CLUSTER;
+ }
+ return RunMode.EXTERNAL_CLUSTER;
+ }
+
+ public RestTestSuiteRunner(Class<?> testClass) throws InitializationError {
+ super(testClass);
+
+ this.runMode = runMode();
+
+ if (runMode == RunMode.NO) {
+ //the tests won't be run. the run method will be called anyway but we'll just mark the whole suite as ignored
+ //no need to go ahead and parse the test suites then
+ this.runnerRandomness = null;
+ this.testSectionRandomnessOverride = null;
+ this.restTestExecutionContext = null;
+ this.restTestCandidates = null;
+ this.rootDescription = createRootDescription(getRootSuiteTitle());
+ this.rootDescription.addChild(createApiDescription("empty suite"));
+ this.testCluster = null;
+ return;
+ }
+
+ //the REST test suite is supposed to be run only once per jvm against either an external es node or a self started one
+ if (sequencer.getAndIncrement() > 0) {
+ throw new InitializationError("only one instance of RestTestSuiteRunner can be created per jvm");
+ }
+
+ //either read the seed from system properties (first one in the chain) or generate a new one
+ final String globalSeed = System.getProperty(SYSPROP_RANDOM_SEED());
+ final long initialSeed;
+ Randomness randomnessOverride = null;
+ if (Strings.hasLength(globalSeed)) {
+ final long[] seedChain = parseSeedChain(globalSeed);
+ if (seedChain.length == 0 || seedChain.length > 2) {
+ throw new IllegalArgumentException("Invalid system property "
+ + SYSPROP_RANDOM_SEED() + " specification: " + globalSeed);
+ }
+ if (seedChain.length > 1) {
+ //read the test section level seed if present
+ randomnessOverride = new Randomness(seedChain[1]);
+ }
+ initialSeed = seedChain[0];
+ } else {
+ initialSeed = MurmurHash3.hash(System.nanoTime());
+ }
+ this.runnerRandomness = new Randomness(initialSeed);
+ this.testSectionRandomnessOverride = randomnessOverride;
+ logger.info("Master seed: {}", SeedUtils.formatSeed(initialSeed));
+
+ List<InetSocketAddress> addresses = Lists.newArrayList();
+ if (runMode == RunMode.TEST_CLUSTER) {
+ this.testCluster = new TestCluster(initialSeed, 1, 3,
+ clusterName("REST-tests", ElasticsearchTestCase.CHILD_VM_ID, initialSeed));
+ this.testCluster.beforeTest(runnerRandomness.getRandom(), 0.0f);
+ for (HttpServerTransport httpServerTransport : testCluster.getInstances(HttpServerTransport.class)) {
+ addresses.add(((InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress()).address());
+ }
+ } else {
+ this.testCluster = null;
+ String testsMode = System.getProperty(REST_TESTS_MODE);
+ String[] stringAddresses = testsMode.split(",");
+ for (String stringAddress : stringAddresses) {
+ String[] split = stringAddress.split(":");
+ if (split.length < 2) {
+ throw new InitializationError("address [" + testsMode + "] not valid");
+ }
+ try {
+ addresses.add(new InetSocketAddress(split[0], Integer.valueOf(split[1])));
+ } catch(NumberFormatException e) {
+ throw new InitializationError("port is not valid, expected number but was [" + split[1] + "]");
+ }
+ }
+ }
+
+ try {
+ String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
+ RestSpec restSpec = RestSpec.parseFrom(DEFAULT_SPEC_PATH, specPaths);
+ this.restTestExecutionContext = new RestTestExecutionContext(addresses.toArray(new InetSocketAddress[addresses.size()]), restSpec);
+ this.rootDescription = createRootDescription(getRootSuiteTitle());
+ this.restTestCandidates = collectTestCandidates(rootDescription);
+ } catch (InitializationError e) {
+ stopTestCluster();
+ throw e;
+ } catch (Throwable e) {
+ stopTestCluster();
+ throw new InitializationError(e);
+ }
+ }
+
+ /**
+ * Parse the test suites and creates the test candidates to be run, together with their junit descriptions.
+ * The descriptions will be part of a tree containing api/yaml file/test section/eventual multiple iterations.
+ * The test candidates will be instead flattened out to the leaves level (iterations), the part that needs to be run.
+ */
+ protected List<RestTestCandidate> collectTestCandidates(Description rootDescription) throws InitializationError, IOException {
+
+ String[] paths = resolvePathsProperty(REST_TESTS_SUITE, DEFAULT_TESTS_PATH);
+ Map<String, Set<File>> yamlSuites = FileUtils.findYamlSuites(DEFAULT_TESTS_PATH, paths);
+
+ String sectionFilter = System.getProperty(REST_TESTS_SECTION);
+ Pattern sectionFilterPattern = null;
+ if (Strings.hasLength(sectionFilter)) {
+ sectionFilterPattern = Pattern.compile(sectionFilter, Pattern.CASE_INSENSITIVE);
+ }
+
+ int iterations = determineTestSectionIterationCount();
+ boolean appendSeedParameter = RandomizedTest.systemPropertyAsBoolean(SYSPROP_APPEND_SEED(), false);
+
+ //we iterate over the files and we shuffle them (grouped by api, and by yaml file)
+ //meanwhile we create the junit descriptions and test candidates (one per iteration)
+
+ //yaml suites are grouped by directory (effectively by api)
+ List<String> apis = Lists.newArrayList(yamlSuites.keySet());
+ Collections.shuffle(apis, runnerRandomness.getRandom());
+
+ final boolean fixedSeed = testSectionRandomnessOverride != null;
+ final boolean hasRepetitions = iterations > 1;
+
+ List<Throwable> parseExceptions = Lists.newArrayList();
+ List<RestTestCandidate> testCandidates = Lists.newArrayList();
+ RestTestSuiteParser restTestSuiteParser = new RestTestSuiteParser();
+ for (String api : apis) {
+
+ Description apiDescription = createApiDescription(api);
+
+ List<File> yamlFiles = Lists.newArrayList(yamlSuites.get(api));
+ Collections.shuffle(yamlFiles, runnerRandomness.getRandom());
+
+ for (File yamlFile : yamlFiles) {
+ RestTestSuite restTestSuite;
+ try {
+ restTestSuite = restTestSuiteParser.parse(restTestExecutionContext.esVersion(), api, yamlFile);
+ } catch (RestTestParseException e) {
+ parseExceptions.add(e);
+ //we continue so that we collect all parse errors and show them all at once
+ continue;
+ }
+
+ Description testSuiteDescription = createTestSuiteDescription(restTestSuite);
+
+ if (restTestSuite.getTestSections().size() == 0) {
+ assert restTestSuite.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion());
+ testCandidates.add(RestTestCandidate.empty(restTestSuite, testSuiteDescription));
+ continue;
+ }
+
+ Collections.shuffle(restTestSuite.getTestSections(), runnerRandomness.getRandom());
+
+ for (TestSection testSection : restTestSuite.getTestSections()) {
+
+ if (sectionFilterPattern != null) {
+ if (!sectionFilterPattern.matcher(testSection.getName()).find()) {
+ continue;
+ }
+ }
+
+ //no need to generate seed if we are going to skip the test section
+ if (testSection.getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ Description testSectionDescription = createTestSectionIterationDescription(restTestSuite, testSection, null);
+ testSuiteDescription.addChild(testSectionDescription);
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSuiteDescription, testSection, testSectionDescription, -1));
+ continue;
+ }
+
+ Description parentDescription;
+ if (hasRepetitions) {
+ //additional level to group multiple iterations under the same test section's node
+ parentDescription = createTestSectionWithRepetitionsDescription(restTestSuite, testSection);
+ testSuiteDescription.addChild(parentDescription);
+ } else {
+ parentDescription = testSuiteDescription;
+ }
+
+ final long testSectionSeed = determineTestSectionSeed(restTestSuite.getDescription() + "/" + testSection.getName());
+ for (int i = 0; i < iterations; i++) {
+ //test section name argument needs to be unique here
+ long thisSeed = (fixedSeed ? testSectionSeed : testSectionSeed ^ MurmurHash3.hash((long) i));
+
+ final LinkedHashMap<String, Object> args = new LinkedHashMap<String, Object>();
+ if (hasRepetitions) {
+ args.put("#", i);
+ }
+ if (hasRepetitions || appendSeedParameter) {
+ args.put("seed=", SeedUtils.formatSeedChain(runnerRandomness, new Randomness(thisSeed)));
+ }
+
+ Description testSectionDescription = createTestSectionIterationDescription(restTestSuite, testSection, args);
+ parentDescription.addChild(testSectionDescription);
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSuiteDescription, testSection, testSectionDescription, thisSeed));
+ }
+ }
+
+ //we add the suite only if it has at least a section left
+ if (testSuiteDescription.getChildren().size() > 0) {
+ apiDescription.addChild(testSuiteDescription);
+ }
+ }
+
+ //we add the api only if it has at least a suite left
+ if (apiDescription.getChildren().size() > 0) {
+ rootDescription.addChild(apiDescription);
+ }
+ }
+
+ if (!parseExceptions.isEmpty()) {
+ throw new InitializationError(parseExceptions);
+ }
+
+ if (rootDescription.getChildren().size() == 0) {
+ throw new InitializationError("No tests to run");
+ }
+
+ return testCandidates;
+ }
+
+ protected String getRootSuiteTitle() {
+ if (runMode == RunMode.NO) {
+ return "elasticsearch REST Tests - not run";
+ }
+ if (runMode == RunMode.TEST_CLUSTER) {
+ return String.format(Locale.ROOT, "elasticsearch REST Tests - test cluster");
+ }
+ if (runMode == RunMode.EXTERNAL_CLUSTER) {
+ return String.format(Locale.ROOT, "elasticsearch REST Tests - external cluster %s", System.getProperty(REST_TESTS_MODE));
+ }
+ throw new UnsupportedOperationException("runMode [" + runMode + "] not supported");
+ }
+
+ private int determineTestSectionIterationCount() {
+ int iterations = RandomizedTest.systemPropertyAsInt(SYSPROP_ITERATIONS(), DEFAULT_ITERATIONS);
+ if (iterations < 1) {
+ throw new IllegalArgumentException("System property " + SYSPROP_ITERATIONS() + " must be >= 1 but was [" + iterations + "]");
+ }
+ return iterations;
+ }
+
+ protected static String[] resolvePathsProperty(String propertyName, String defaultValue) {
+ String property = System.getProperty(propertyName);
+ if (!Strings.hasLength(property)) {
+ return new String[]{defaultValue};
+ } else {
+ return property.split(PATHS_SEPARATOR);
+ }
+ }
+
+ /**
+ * Determine a given test section's initial random seed
+ */
+ private long determineTestSectionSeed(String testSectionName) {
+ if (testSectionRandomnessOverride != null) {
+ return getSeed(testSectionRandomnessOverride);
+ }
+
+ // We assign each test section a different starting hash based on the global seed
+ // and a hash of their name (so that the order of sections does not matter, only their names)
+ return getSeed(runnerRandomness) ^ MurmurHash3.hash((long) testSectionName.hashCode());
+ }
+
+ @Override
+ protected List<RestTestCandidate> getChildren() {
+ return restTestCandidates;
+ }
+
+ @Override
+ public Description getDescription() {
+ return rootDescription;
+ }
+
+ @Override
+ protected Description describeChild(RestTestCandidate child) {
+ return child.describeTest();
+ }
+
+ @Override
+ protected Statement classBlock(RunNotifier notifier) {
+ //we remove support for @BeforeClass & @AfterClass and JUnit Rules (as we don't call super)
+ Statement statement = childrenInvoker(notifier);
+ statement = withExecutionContextClose(statement);
+ if (testCluster != null) {
+ return withTestClusterClose(statement);
+ }
+ return statement;
+ }
+
+ protected Statement withExecutionContextClose(Statement statement) {
+ return new RunAfter(statement, new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ restTestExecutionContext.close();
+ }
+ });
+ }
+
+ protected Statement withTestClusterClose(Statement statement) {
+ return new RunAfter(statement, new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ stopTestCluster();
+ }
+ });
+ }
+
+ @Override
+ public void run(final RunNotifier notifier) {
+
+ if (runMode == RunMode.NO) {
+ notifier.fireTestIgnored(rootDescription.getChildren().get(0));
+ return;
+ }
+ final RestReproduceInfoPrinter restReproduceInfoPrinter = new RestReproduceInfoPrinter();
+ notifier.addListener(restReproduceInfoPrinter);
+ try {
+ //the test suite gets run on a separate thread as the randomized context is per thread
+ //once the randomized context is disposed it's not possible to create it again on the same thread
+ final Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ createRandomizedContext(getTestClass().getJavaClass(), runnerRandomness);
+ RestTestSuiteRunner.super.run(notifier);
+ } finally {
+ disposeRandomizedContext();
+ }
+ }
+ };
+
+ thread.start();
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ notifier.fireTestFailure(new Failure(getDescription(),
+ new RuntimeException("Interrupted while waiting for the suite runner? Weird.", e)));
+ }
+ } finally {
+ // remove the listener once the suite is done otherwise it will print
+ // a bogus line if a subsequent test fails that is not a
+ // REST test. The RunNotifier is used across suites!
+ notifier.removeListener(restReproduceInfoPrinter);
+ }
+ }
+
+ @Override
+ protected void runChild(RestTestCandidate testCandidate, RunNotifier notifier) {
+
+ //if the while suite needs to be skipped, no test sections were loaded, only an empty one that we need to mark as ignored
+ if (testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ if (logger.isInfoEnabled()) {
+ if (testCandidate.getSetupSection().getSkipSection().isVersionCheck()) {
+ logger.info("skipped test suite [{}]\nreason: {}\nskip versions: {} (current version: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getSetupSection().getSkipSection().getReason(),
+ testCandidate.getSetupSection().getSkipSection().getVersion(), restTestExecutionContext.esVersion());
+ } else {
+ logger.info("skipped test suite [{}]\nreason: feature not supported\nrequired features: {} (supported features: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getSetupSection().getSkipSection().getFeatures(), Features.getSupported());
+ }
+ }
+ notifier.fireTestIgnored(testCandidate.describeSuite());
+ return;
+ }
+
+ //from now on no more empty test candidates are expected
+ assert testCandidate.getTestSection() != null;
+
+ if (testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion())) {
+ if (logger.isInfoEnabled()) {
+ if (testCandidate.getTestSection().getSkipSection().isVersionCheck()) {
+ logger.info("skipped test [{}/{}]\nreason: {}\nskip versions: {} (current version: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName(),
+ testCandidate.getTestSection().getSkipSection().getReason(),
+ testCandidate.getTestSection().getSkipSection().getVersion(), restTestExecutionContext.esVersion());
+ } else {
+ logger.info("skipped test [{}/{}]\nreason: feature not supported\nrequired features: {} (supported features: {})",
+ testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName(),
+ testCandidate.getTestSection().getSkipSection().getFeatures(), Features.getSupported());
+ }
+ }
+
+ notifier.fireTestIgnored(testCandidate.describeTest());
+ return;
+ }
+
+ runLeaf(methodBlock(testCandidate), testCandidate.describeTest(), notifier);
+ }
+
+ protected Statement methodBlock(final RestTestCandidate testCandidate) {
+ return new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ final String testThreadName = "TEST-" + testCandidate.getSuiteDescription() +
+ "." + testCandidate.getTestSection().getName() + "-seed#" + SeedUtils.formatSeedChain(runnerRandomness);
+ // This has a side effect of setting up a nested context for the test thread.
+ final String restoreName = Thread.currentThread().getName();
+ try {
+ Thread.currentThread().setName(testThreadName);
+ pushRandomness(new Randomness(testCandidate.getSeed()));
+ runTestSection(testCandidate);
+ } finally {
+ Thread.currentThread().setName(restoreName);
+ popAndDestroy();
+ }
+ }
+ };
+ }
+
+ protected void runTestSection(RestTestCandidate testCandidate)
+ throws IOException, RestException {
+
+ //let's check that there is something to run, otherwise there might be a problem with the test section
+ if (testCandidate.getTestSection().getExecutableSections().size() == 0) {
+ throw new IllegalArgumentException("No executable sections loaded for ["
+ + testCandidate.getSuiteDescription() + "/" + testCandidate.getTestSection().getName() + "]");
+ }
+
+ logger.info("cleaning up before test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ tearDown();
+
+ logger.info("start test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+
+ if (!testCandidate.getSetupSection().isEmpty()) {
+ logger.info("start setup test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {
+ doSection.execute(restTestExecutionContext);
+ }
+ logger.info("end setup test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ }
+
+ restTestExecutionContext.clear();
+
+ for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) {
+ executableSection.execute(restTestExecutionContext);
+ }
+
+ logger.info("end test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+
+ logger.info("cleaning up after test [{}: {}]", testCandidate.getSuiteDescription(), testCandidate.getTestSection().getName());
+ tearDown();
+ }
+
+ private void tearDown() throws IOException, RestException {
+ wipeIndices();
+ wipeTemplates();
+ restTestExecutionContext.clear();
+ }
+
+ private void wipeIndices() throws IOException, RestException {
+ logger.debug("deleting all indices");
+ RestResponse restResponse = restTestExecutionContext.callApiInternal("indices.delete", "index", "_all");
+ assertThat(restResponse.getStatusCode(), equalTo(200));
+ }
+
+ @SuppressWarnings("unchecked")
+ public void wipeTemplates() throws IOException, RestException {
+ logger.debug("deleting all templates");
+ RestResponse restResponse = restTestExecutionContext.callApiInternal("indices.delete_template", "name", "*");
+ assertThat(restResponse.getStatusCode(), equalTo(200));
+ }
+
+ private void stopTestCluster() {
+ if (runMode == RunMode.TEST_CLUSTER) {
+ assert testCluster != null;
+ testCluster.afterTest();
+ testCluster.close();
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java b/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java
new file mode 100644
index 0000000..c786935
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/junit/RunAfter.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.junit;
+
+import com.google.common.collect.Lists;
+import org.junit.runners.model.MultipleFailureException;
+import org.junit.runners.model.Statement;
+
+import java.util.List;
+
+/**
+ * {@link Statement} that allows to run a specific statement after another one
+ */
+public class RunAfter extends Statement {
+
+ private final Statement next;
+ private final Statement after;
+
+ public RunAfter(Statement next, Statement after) {
+ this.next = next;
+ this.after = after;
+ }
+
+ @Override
+ public void evaluate() throws Throwable {
+ List<Throwable> errors = Lists.newArrayList();
+ try {
+ next.evaluate();
+ } catch (Throwable e) {
+ errors.add(e);
+ } finally {
+ try {
+ after.evaluate();
+ } catch (Throwable e) {
+ errors.add(e);
+ }
+ }
+ MultipleFailureException.assertEmpty(errors);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
new file mode 100644
index 0000000..ec5aef5
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for do sections
+ */
+public class DoSectionParser implements RestTestFragmentParser<DoSection> {
+
+ @Override
+ public DoSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ DoSection doSection = new DoSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("catch".equals(currentFieldName)) {
+ doSection.setCatch(parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if (currentFieldName != null) {
+ ApiCallSection apiCallSection = new ApiCallSection(currentFieldName);
+ String paramName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ paramName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("body".equals(paramName)) {
+ String body = parser.text();
+ XContentType bodyContentType = XContentFactory.xContentType(body);
+ XContentParser bodyParser = XContentFactory.xContent(bodyContentType).createParser(body);
+ //multiple bodies are supported e.g. in case of bulk provided as a whole string
+ while(bodyParser.nextToken() != null) {
+ apiCallSection.addBody(bodyParser.mapOrdered());
+ }
+ } else {
+ apiCallSection.addParam(paramName, parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("body".equals(paramName)) {
+ apiCallSection.addBody(parser.mapOrdered());
+ }
+ }
+ }
+ doSection.setApiCallSection(apiCallSection);
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (doSection.getApiCallSection() == null) {
+ throw new RestTestParseException("client call section is mandatory within a do section");
+ }
+
+ return doSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
new file mode 100644
index 0000000..a661221
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gt assert sections
+ */
+public class GreaterThanParser implements RestTestFragmentParser<GreaterThanAssertion> {
+
+ @Override
+ public GreaterThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
new file mode 100644
index 0000000..81cade6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsFalseAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_false assert sections
+ */
+public class IsFalseParser implements RestTestFragmentParser<IsFalseAssertion> {
+
+ @Override
+ public IsFalseAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsFalseAssertion(parseContext.parseField());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
new file mode 100644
index 0000000..922629b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_true assert sections
+ */
+public class IsTrueParser implements RestTestFragmentParser<IsTrueAssertion> {
+
+ @Override
+ public IsTrueAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsTrueAssertion(parseContext.parseField());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java b/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
new file mode 100644
index 0000000..414be59
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LengthAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for length assert sections
+ */
+public class LengthParser implements RestTestFragmentParser<LengthAssertion> {
+
+ @Override
+ public LengthAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ assert stringObjectTuple.v2() != null;
+ int value;
+ if (stringObjectTuple.v2() instanceof Number) {
+ value = ((Number) stringObjectTuple.v2()).intValue();
+ } else {
+ try {
+ value = Integer.valueOf(stringObjectTuple.v2().toString());
+ } catch(NumberFormatException e) {
+ throw new RestTestParseException("length is not a valid number", e);
+ }
+
+ }
+ return new LengthAssertion(stringObjectTuple.v1(), value);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
new file mode 100644
index 0000000..065dd19
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lt assert sections
+ */
+public class LessThanParser implements RestTestFragmentParser<LessThanAssertion> {
+
+ @Override
+ public LessThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java b/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
new file mode 100644
index 0000000..30ee18a
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for match assert sections
+ */
+public class MatchParser implements RestTestFragmentParser<MatchAssertion> {
+
+ @Override
+ public MatchAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ return new MatchAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
new file mode 100644
index 0000000..8d2bd8b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import java.io.IOException;
+
+/**
+ * Base parser for a REST test suite fragment
+ * @param <T> the test fragment's type that gets parsed and returned
+ */
+public interface RestTestFragmentParser<T> {
+
+ /**
+ * Parses a test fragment given the current {@link RestTestSuiteParseContext}
+ */
+ T parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException;
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
new file mode 100644
index 0000000..3e1af2c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+/**
+ * Exception thrown whenever there is a problem parsing any of the REST test suite fragment
+ */
+public class RestTestParseException extends Exception {
+
+ RestTestParseException(String message) {
+ super(message);
+ }
+
+ RestTestParseException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
new file mode 100644
index 0000000..7ff06f4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for a complete test section
+ *
+ * Depending on the elasticsearch version the tests are going to run against, test sections might need to get skipped
+ * In that case the relevant test sections parsing is entirely skipped
+ */
+public class RestTestSectionParser implements RestTestFragmentParser<TestSection> {
+
+ @Override
+ public TestSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+ parseContext.advanceToFieldName();
+ TestSection testSection = new TestSection(parser.currentName());
+ parser.nextToken();
+ testSection.setSkipSection(parseContext.parseSkipSection());
+
+ boolean skip = testSection.getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ parseContext.advanceToFieldName();
+ testSection.addExecutableSection(parseContext.parseExecutableSection());
+ }
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return testSection;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
new file mode 100644
index 0000000..5573264
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Context shared across the whole tests parse phase.
+ * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version)
+ */
+public class RestTestSuiteParseContext {
+
+ private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser();
+ private static final RestTestSectionParser TEST_SECTION_PARSER = new RestTestSectionParser();
+ private static final SkipSectionParser SKIP_SECTION_PARSER = new SkipSectionParser();
+ private static final DoSectionParser DO_SECTION_PARSER = new DoSectionParser();
+ private static final Map<String, RestTestFragmentParser<? extends ExecutableSection>> EXECUTABLE_SECTIONS_PARSERS = Maps.newHashMap();
+ static {
+ EXECUTABLE_SECTIONS_PARSERS.put("do", DO_SECTION_PARSER);
+ EXECUTABLE_SECTIONS_PARSERS.put("set", new SetSectionParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("match", new MatchParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_true", new IsTrueParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_false", new IsFalseParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gt", new GreaterThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lt", new LessThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("length", new LengthParser());
+ }
+
+ private final String api;
+ private final String suiteName;
+ private final XContentParser parser;
+ private final String currentVersion;
+
+ public RestTestSuiteParseContext(String api, String suiteName, XContentParser parser, String currentVersion) {
+ this.api = api;
+ this.suiteName = suiteName;
+ this.parser = parser;
+ this.currentVersion = currentVersion;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getSuiteName() {
+ return suiteName;
+ }
+
+ public XContentParser parser() {
+ return parser;
+ }
+
+ public String getCurrentVersion() {
+ return currentVersion;
+ }
+
+ public SetupSection parseSetupSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("setup".equals(parser.currentName())) {
+ parser.nextToken();
+ SetupSection setupSection = SETUP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return setupSection;
+ }
+
+ return SetupSection.EMPTY;
+ }
+
+ public TestSection parseTestSection() throws IOException, RestTestParseException {
+ return TEST_SECTION_PARSER.parse(this);
+ }
+
+ public SkipSection parseSkipSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("skip".equals(parser.currentName())) {
+ SkipSection skipSection = SKIP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return skipSection;
+ }
+
+ return SkipSection.EMPTY;
+ }
+
+ public ExecutableSection parseExecutableSection() throws IOException, RestTestParseException {
+ advanceToFieldName();
+ String section = parser.currentName();
+ RestTestFragmentParser<? extends ExecutableSection> execSectionParser = EXECUTABLE_SECTIONS_PARSERS.get(section);
+ if (execSectionParser == null) {
+ throw new RestTestParseException("no parser found for executable section [" + section + "]");
+ }
+ ExecutableSection executableSection = execSectionParser.parse(this);
+ parser.nextToken();
+ return executableSection;
+ }
+
+ public DoSection parseDoSection() throws IOException, RestTestParseException {
+ return DO_SECTION_PARSER.parse(this);
+ }
+
+ public void advanceToFieldName() throws IOException, RestTestParseException {
+ XContentParser.Token token = parser.currentToken();
+ //we are in the beginning, haven't called nextToken yet
+ if (token == null) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_ARRAY) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new RestTestParseException("malformed test section: field name expected but found " + token);
+ }
+ }
+
+ public String parseField() throws IOException, RestTestParseException {
+ parser.nextToken();
+ assert parser.currentToken().isValue();
+ String field = parser.text();
+ parser.nextToken();
+ return field;
+ }
+
+ public Tuple<String, Object> parseTuple() throws IOException, RestTestParseException {
+ parser.nextToken();
+ advanceToFieldName();
+ Map<String,Object> map = parser.map();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ if (map.size() != 1) {
+ throw new RestTestParseException("expected key value pair but found " + map.size() + " ");
+ }
+
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ return Tuple.tuple(entry.getKey(), entry.getValue());
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
new file mode 100644
index 0000000..692e9aa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+/**
+ * Parser for a complete test suite (yaml file)
+ *
+ * Depending on the elasticsearch version the tests are going to run against, a whole test suite might need to get skipped
+ * In that case the relevant test sections parsing is entirely skipped
+ */
+public class RestTestSuiteParser implements RestTestFragmentParser<RestTestSuite> {
+
+ public RestTestSuite parse(String currentVersion, String api, File file) throws IOException, RestTestParseException {
+
+ if (!file.isFile()) {
+ throw new IllegalArgumentException(file.getAbsolutePath() + " is not a file");
+ }
+
+ String filename = file.getName();
+ //remove the file extension
+ int i = filename.lastIndexOf('.');
+ if (i > 0) {
+ filename = filename.substring(0, i);
+ }
+
+ //our yaml parser seems to be too tolerant. Each yaml suite must end with \n, otherwise clients tests might break.
+ RandomAccessFile randomAccessFile = null;
+ try {
+ randomAccessFile = new RandomAccessFile(file, "r");
+ randomAccessFile.skipBytes((int)randomAccessFile.length() - 1);
+ if (randomAccessFile.read() != 10) {
+ throw new RestTestParseException("test suite [" + api + "/" + filename + "] doesn't end with line feed (\\n)");
+ }
+ } finally {
+ IOUtils.close(randomAccessFile);
+ }
+
+ XContentParser parser = YamlXContent.yamlXContent.createParser(new FileInputStream(file));
+ try {
+ RestTestSuiteParseContext testParseContext = new RestTestSuiteParseContext(api, filename, parser, currentVersion);
+ return parse(testParseContext);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public RestTestSuite parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+
+ RestTestSuite restTestSuite = new RestTestSuite(parseContext.getApi(), parseContext.getSuiteName());
+
+ restTestSuite.setSetupSection(parseContext.parseSetupSection());
+
+ boolean skip = restTestSuite.getSetupSection().getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while(true) {
+ //the "---" section separator is not understood by the yaml parser. null is returned, same as when the parser is closed
+ //we need to somehow distinguish between a null in the middle of a test ("---")
+ // and a null at the end of the file (at least two consecutive null tokens)
+ if(parser.currentToken() == null) {
+ if (parser.nextToken() == null) {
+ break;
+ }
+ }
+
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ TestSection testSection = parseContext.parseTestSection();
+ if (!restTestSuite.addTestSection(testSection)) {
+ throw new RestTestParseException("duplicate test section [" + testSection.getName() + "] found in [" + restTestSuite.getDescription() + "]");
+ }
+ }
+ }
+
+ return restTestSuite;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
new file mode 100644
index 0000000..8afafc0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for set sections
+ */
+public class SetSectionParser implements RestTestFragmentParser<SetSection> {
+
+ @Override
+ public SetSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ SetSection setSection = new SetSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ setSection.addSet(currentFieldName, parser.text());
+ }
+ }
+
+ parser.nextToken();
+
+ if (setSection.getStash().isEmpty()) {
+ throw new RestTestParseException("set section must set at least a value");
+ }
+
+ return setSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
new file mode 100644
index 0000000..e036755
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for setup sections
+ */
+public class SetupSectionParser implements RestTestFragmentParser<SetupSection> {
+
+ @Override
+ public SetupSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ SetupSection setupSection = new SetupSection();
+ setupSection.setSkipSection(parseContext.parseSkipSection());
+
+ boolean skip = setupSection.getSkipSection().skip(parseContext.getCurrentVersion());
+
+ while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ if (skip) {
+ //if there was a skip section, there was a setup section as well, which means that we are sure
+ // the current token is at the beginning of a new object
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ //we need to be at the beginning of an object to be able to skip children
+ parser.skipChildren();
+ //after skipChildren we are at the end of the skipped object, need to move on
+ parser.nextToken();
+ } else {
+ parseContext.advanceToFieldName();
+ if (!"do".equals(parser.currentName())) {
+ throw new RestTestParseException("section [" + parser.currentName() + "] not supported within setup section");
+ }
+
+ parser.nextToken();
+ setupSection.addDoSection(parseContext.parseDoSection());
+ parser.nextToken();
+ }
+ }
+
+ parser.nextToken();
+
+ return setupSection;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
new file mode 100644
index 0000000..0a81583
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Parser for skip sections
+ */
+public class SkipSectionParser implements RestTestFragmentParser<SkipSection> {
+
+ @Override
+ public SkipSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ String version = null;
+ String reason = null;
+ List<String> features = Lists.newArrayList();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.text();
+ } else if ("reason".equals(currentFieldName)) {
+ reason = parser.text();
+ } else if ("features".equals(currentFieldName)) {
+ features.add(parser.text());
+ }
+ else {
+ throw new RestTestParseException("field " + currentFieldName + " not supported within skip section");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("features".equals(currentFieldName)) {
+ while(parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ features.add(parser.text());
+ }
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (!Strings.hasLength(version) && features.isEmpty()) {
+ throw new RestTestParseException("version or features is mandatory within skip section");
+ }
+ if (Strings.hasLength(version) && !features.isEmpty()) {
+ throw new RestTestParseException("version or features are mutually exclusive");
+ }
+ if (Strings.hasLength(version) && !Strings.hasLength(reason)) {
+ throw new RestTestParseException("reason is mandatory within skip version section");
+ }
+
+ return new SkipSection(version, features, reason);
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
new file mode 100644
index 0000000..2a49cd4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Represents a test fragment that contains the information needed to call an api
+ */
+public class ApiCallSection {
+
+ private final String api;
+ private final Map<String, String> params = Maps.newHashMap();
+ private final List<Map<String, Object>> bodies = Lists.newArrayList();
+
+ public ApiCallSection(String api) {
+ this.api = api;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public Map<String, String> getParams() {
+ //make sure we never modify the parameters once returned
+ return ImmutableMap.copyOf(params);
+ }
+
+ public void addParam(String key, String value) {
+ String existingValue = params.get(key);
+ if (existingValue != null) {
+ value = Joiner.on(",").join(existingValue, value);
+ }
+ this.params.put(key, value);
+ }
+
+ public List<Map<String, Object>> getBodies() {
+ return ImmutableList.copyOf(bodies);
+ }
+
+ public void addBody(Map<String, Object> body) {
+ this.bodies.add(body);
+ }
+
+ public boolean hasBody() {
+ return bodies.size() > 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/Assertion.java b/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
new file mode 100644
index 0000000..4639732
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Base class for executable sections that hold assertions
+ */
+public abstract class Assertion implements ExecutableSection {
+
+ private final String field;
+ private final Object expectedValue;
+
+ protected Assertion(String field, Object expectedValue) {
+ this.field = field;
+ this.expectedValue = expectedValue;
+ }
+
+ public final String getField() {
+ return field;
+ }
+
+ public final Object getExpectedValue() {
+ return expectedValue;
+ }
+
+ protected final Object resolveExpectedValue(RestTestExecutionContext executionContext) throws IOException {
+ if (expectedValue instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> map = (Map<String, Object>) expectedValue;
+ return executionContext.stash().unstashMap(map);
+ }
+
+ if (executionContext.stash().isStashedValue(expectedValue)) {
+ return executionContext.stash().unstashValue(expectedValue.toString());
+ }
+ return expectedValue;
+ }
+
+ protected final Object getActualValue(RestTestExecutionContext executionContext) throws IOException {
+ if (executionContext.stash().isStashedValue(field)) {
+ return executionContext.stash().unstashValue(field);
+ }
+ return executionContext.response(field);
+ }
+
+ @Override
+ public final void execute(RestTestExecutionContext executionContext) throws IOException {
+ doAssert(getActualValue(executionContext), resolveExpectedValue(executionContext));
+ }
+
+ /**
+ * Executes the assertion comparing the actual value (parsed from the response) with the expected one
+ */
+ protected abstract void doAssert(Object actualValue, Object expectedValue);
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/DoSection.java b/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
new file mode 100644
index 0000000..3ea1ca0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.collect.Tuple.tuple;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a do section:
+ *
+ * - do:
+ * catch: missing
+ * update:
+ * index: test_1
+ * type: test
+ * id: 1
+ * body: { doc: { foo: bar } }
+ *
+ */
+public class DoSection implements ExecutableSection {
+
+ private static final ESLogger logger = Loggers.getLogger(DoSection.class);
+
+ private String catchParam;
+ private ApiCallSection apiCallSection;
+
+ public String getCatch() {
+ return catchParam;
+ }
+
+ public void setCatch(String catchParam) {
+ this.catchParam = catchParam;
+ }
+
+ public ApiCallSection getApiCallSection() {
+ return apiCallSection;
+ }
+
+ public void setApiCallSection(ApiCallSection apiCallSection) {
+ this.apiCallSection = apiCallSection;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+
+ if ("param".equals(catchParam)) {
+ //client should throw validation error before sending request
+ //lets just return without doing anything as we don't have any client to test here
+ logger.info("found [catch: param], no request sent");
+ return;
+ }
+
+ try {
+ RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies());
+ if (Strings.hasLength(catchParam)) {
+ String catchStatusCode;
+ if (catches.containsKey(catchParam)) {
+ catchStatusCode = catches.get(catchParam).v1();
+ } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ catchStatusCode = "4xx|5xx";
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ fail(formatStatusCodeMessage(restResponse, catchStatusCode));
+ }
+ } catch(RestException e) {
+ if (!Strings.hasLength(catchParam)) {
+ fail(formatStatusCodeMessage(e.restResponse(), "2xx"));
+ } else if (catches.containsKey(catchParam)) {
+ assertStatusCode(e.restResponse());
+ } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ //the text of the error message matches regular expression
+ assertThat(formatStatusCodeMessage(e.restResponse(), "4xx|5xx"), e.statusCode(), greaterThanOrEqualTo(400));
+ Object error = executionContext.response("error");
+ assertThat("error was expected in the response", error, notNullValue());
+ //remove delimiters from regex
+ String regex = catchParam.substring(1, catchParam.length() - 1);
+ assertThat("the error message was expected to match the provided regex but didn't",
+ error.toString(), matches(regex));
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ }
+ }
+
+ private void assertStatusCode(RestResponse restResponse) {
+ Tuple<String, org.hamcrest.Matcher<Integer>> stringMatcherTuple = catches.get(catchParam);
+ assertThat(formatStatusCodeMessage(restResponse, stringMatcherTuple.v1()),
+ restResponse.getStatusCode(), stringMatcherTuple.v2());
+ }
+
+ private String formatStatusCodeMessage(RestResponse restResponse, String expected) {
+ return "expected [" + expected + "] status code but api [" + apiCallSection.getApi() + "] returned ["
+ + restResponse.getStatusCode() + " " + restResponse.getReasonPhrase() + "] [" + restResponse.getBody() + "]";
+ }
+
+ private static Map<String, Tuple<String, org.hamcrest.Matcher<Integer>>> catches = Maps.newHashMap();
+
+ static {
+ catches.put("missing", tuple("404", equalTo(404)));
+ catches.put("conflict", tuple("409", equalTo(409)));
+ catches.put("forbidden", tuple("403", equalTo(403)));
+ catches.put("request", tuple("4xx|5xx", greaterThanOrEqualTo(400)));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
new file mode 100644
index 0000000..669d82c
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+
+/**
+ * Represents a test fragment that can be executed (e.g. api call, assertion)
+ */
+public interface ExecutableSection {
+
+ /**
+ * Executes the section passing in the execution context
+ */
+ void execute(RestTestExecutionContext executionContext) throws IOException;
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
new file mode 100644
index 0000000..f79dd18
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a gt assert section:
+ *
+ * - gt: { fields._ttl: 0}
+ *
+ */
+public class GreaterThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class);
+
+ public GreaterThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than [{}]", actualValue, expectedValue);
+ assertThat(actualValue, instanceOf(Comparable.class));
+ assertThat(expectedValue, instanceOf(Comparable.class));
+ assertThat(errorMessage(), (Comparable)actualValue, greaterThan((Comparable) expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
new file mode 100644
index 0000000..056db99
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_false assert section:
+ *
+ * - is_false: get.fields.bar
+ *
+ */
+public class IsFalseAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
+
+ public IsFalseAssertion(String field) {
+ super(field, false);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] doesn't have a true value", actualValue);
+
+ if (actualValue == null) {
+ return;
+ }
+
+ String actualString = actualValue.toString();
+ assertThat(errorMessage(), actualString, anyOf(
+ equalTo(""),
+ equalToIgnoringCase(Boolean.FALSE.toString()),
+ equalTo("0")
+ ));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] has a true value but it shouldn't";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
new file mode 100644
index 0000000..6518276
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_true assert section:
+ *
+ * - is_true: get.fields.bar
+ *
+ */
+public class IsTrueAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
+
+ public IsTrueAssertion(String field) {
+ super(field, true);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has a true value", actualValue);
+ String errorMessage = errorMessage();
+ assertThat(errorMessage, actualValue, notNullValue());
+ String actualString = actualValue.toString();
+ assertThat(errorMessage, actualString, not(equalTo("")));
+ assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
+ assertThat(errorMessage, actualString, not(equalTo("0")));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have a true value";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
new file mode 100644
index 0000000..b13cca1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a length assert section:
+ *
+ * - length: { hits.hits: 1 }
+ *
+ */
+public class LengthAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
+
+ public LengthAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has length [{}]", actualValue, expectedValue);
+ assertThat(expectedValue, instanceOf(Number.class));
+ int length = ((Number) expectedValue).intValue();
+ if (actualValue instanceof String) {
+ assertThat(errorMessage(), ((String) actualValue).length(), equalTo(length));
+ } else if (actualValue instanceof List) {
+ assertThat(errorMessage(), ((List) actualValue).size(), equalTo(length));
+ } else if (actualValue instanceof Map) {
+ assertThat(errorMessage(), ((Map) actualValue).keySet().size(), equalTo(length));
+ } else {
+ throw new UnsupportedOperationException("value is of unsupported type [" + actualValue.getClass().getSimpleName() + "]");
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have length [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
new file mode 100644
index 0000000..52c882d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThan;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a lt assert section:
+ *
+ * - lt: { fields._ttl: 20000}
+ *
+ */
+public class LessThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class);
+
+ public LessThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than [{}]", actualValue, expectedValue);
+ assertThat(actualValue, instanceOf(Comparable.class));
+ assertThat(expectedValue, instanceOf(Comparable.class));
+ assertThat(errorMessage(), (Comparable)actualValue, lessThan((Comparable)expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
new file mode 100644
index 0000000..8b85104
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a match assert section:
+ *
+ * - match: { get.fields._routing: "5" }
+ *
+ */
+public class MatchAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class);
+
+ public MatchAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+
+ //if the value is wrapped into / it is a regexp (e.g. /s+d+/)
+ if (expectedValue instanceof String) {
+ String expValue = ((String) expectedValue).trim();
+ if (expValue.length() > 2 && expValue.startsWith("/") && expValue.endsWith("/")) {
+ String regex = expValue.substring(1, expValue.length() - 1);
+ logger.trace("assert that [{}] matches [{}]", actualValue, regex);
+ assertThat("field [" + getField() + "] was expected to match the provided regex but didn't",
+ actualValue.toString(), matches(regex, Pattern.COMMENTS));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, notNullValue());
+ logger.trace("assert that [{}] matches [{}]", actualValue, expectedValue);
+ if (!actualValue.getClass().equals(expectedValue.getClass())) {
+ if (actualValue instanceof Number && expectedValue instanceof Number) {
+ //Double 1.0 is equal to Integer 1
+ assertThat(errorMessage(), ((Number) actualValue).doubleValue(), equalTo(((Number) expectedValue).doubleValue()));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, equalTo(expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't match the expected value";
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
new file mode 100644
index 0000000..5737e1e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Holds a REST test suite loaded from a specific yaml file.
+ * Supports a setup section and multiple test sections.
+ */
+public class RestTestSuite {
+
+ private final String api;
+ private final String name;
+
+ private SetupSection setupSection;
+
+ private Set<TestSection> testSections = Sets.newHashSet();
+
+ public RestTestSuite(String api, String name) {
+ this.api = api;
+ this.name = name;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ //describes the rest test suite (e.g. index/10_with_id)
+ //useful also to reproduce failures (RestReproduceInfoPrinter)
+ public String getDescription() {
+ return api + "/" + name;
+ }
+
+ public SetupSection getSetupSection() {
+ return setupSection;
+ }
+
+ public void setSetupSection(SetupSection setupSection) {
+ this.setupSection = setupSection;
+ }
+
+ /**
+ * Adds a {@link org.elasticsearch.test.rest.section.TestSection} to the REST suite
+ * @return true if the test section was not already present, false otherwise
+ */
+ public boolean addTestSection(TestSection testSection) {
+ return this.testSections.add(testSection);
+ }
+
+ public List<TestSection> getTestSections() {
+ return Lists.newArrayList(testSections);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SetSection.java b/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
new file mode 100644
index 0000000..0a52a77
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Represents a set section:
+ *
+ * - set: {_scroll_id: scroll_id}
+ *
+ */
+public class SetSection implements ExecutableSection {
+
+ private Map<String, String> stash = Maps.newHashMap();
+
+ public void addSet(String responseField, String stashedField) {
+ stash.put(responseField, stashedField);
+ }
+
+ public Map<String, String> getStash() {
+ return stash;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+ for (Map.Entry<String, String> entry : stash.entrySet()) {
+ Object actualValue = executionContext.response(entry.getKey());
+ executionContext.stash().stashValue(entry.getValue(), actualValue);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java b/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
new file mode 100644
index 0000000..72f653e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a setup section. Holds a skip section and multiple do sections.
+ */
+public class SetupSection {
+
+ public static final SetupSection EMPTY;
+
+ static {
+ EMPTY = new SetupSection();
+ EMPTY.setSkipSection(SkipSection.EMPTY);
+ }
+
+ private SkipSection skipSection;
+
+ private List<DoSection> doSections = Lists.newArrayList();
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<DoSection> getDoSections() {
+ return doSections;
+ }
+
+ public void addDoSection(DoSection doSection) {
+ this.doSections.add(doSection);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
new file mode 100644
index 0000000..9e2f5d6
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.test.rest.support.Features;
+import org.elasticsearch.test.rest.support.VersionUtils;
+
+import java.util.List;
+
+/**
+ * Represents a skip section that tells whether a specific test section or suite needs to be skipped
+ * based on:
+ * - the elasticsearch version the tests are running against
+ * - a specific test feature required that might not be implemented yet by the runner
+ */
+public class SkipSection {
+
+ public static final SkipSection EMPTY = new SkipSection("", Lists.<String>newArrayList(), "");
+
+ private final String version;
+ private final List<String> features;
+ private final String reason;
+
+ public SkipSection(String version, List<String> features, String reason) {
+ this.version = version;
+ this.features = features;
+ this.reason = reason;
+ }
+
+ public String getVersion() {
+ return version;
+ }
+
+ public List<String> getFeatures() {
+ return features;
+ }
+
+ public String getReason() {
+ return reason;
+ }
+
+ public boolean skip(String currentVersion) {
+ if (isEmpty()) {
+ return false;
+ }
+
+ if (version != null) {
+ return VersionUtils.skipCurrentVersion(version, currentVersion);
+ }
+
+ if (features != null && !this.features.isEmpty()) {
+ return !Features.areAllSupported(this.features);
+ }
+
+ throw new IllegalArgumentException("version or feature should be not null in a non empty skip section");
+ }
+
+ public boolean isVersionCheck() {
+ return Strings.hasLength(version);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/section/TestSection.java b/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
new file mode 100644
index 0000000..3386b9e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a test section, which is composed of a skip section and multiple executable sections.
+ */
+public class TestSection {
+ private final String name;
+ private SkipSection skipSection;
+ private final List<ExecutableSection> executableSections;
+
+ public TestSection(String name) {
+ this.name = name;
+ this.executableSections = Lists.newArrayList();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<ExecutableSection> getExecutableSections() {
+ return executableSections;
+ }
+
+ public void addExecutableSection(ExecutableSection executableSection) {
+ this.executableSections.add(executableSection);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestSection that = (TestSection) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name != null ? name.hashCode() : 0;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java b/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
new file mode 100644
index 0000000..0996df4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Represents an elasticsearch REST endpoint (api)
+ */
+public class RestApi {
+
+ private final String name;
+ private List<String> methods = Lists.newArrayList();
+ private List<String> paths = Lists.newArrayList();
+ private List<String> pathParts = Lists.newArrayList();
+ private List<String> params = Lists.newArrayList();
+ private BODY body = BODY.NOT_SUPPORTED;
+
+ public static enum BODY {
+ NOT_SUPPORTED, OPTIONAL, REQUIRED
+ }
+
+ RestApi(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public List<String> getMethods() {
+ return methods;
+ }
+
+ /**
+ * Returns the supported http methods given the rest parameters provided
+ */
+ public List<String> getSupportedMethods(Set<String> restParams) {
+ //we try to avoid hardcoded mappings but the index api is the exception
+ if ("index".equals(name) || "create".equals(name)) {
+ List<String> indexMethods = Lists.newArrayList();
+ for (String method : methods) {
+ if (restParams.contains("id")) {
+ //PUT when the id is provided
+ if (HttpPut.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ } else {
+ //POST without id
+ if (HttpPost.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ }
+ }
+ return indexMethods;
+ }
+
+ return methods;
+ }
+
+ void addMethod(String method) {
+ this.methods.add(method);
+ }
+
+ public List<String> getPaths() {
+ return paths;
+ }
+
+ void addPath(String path) {
+ this.paths.add(path);
+ }
+
+ public List<String> getPathParts() {
+ return pathParts;
+ }
+
+ void addPathPart(String pathPart) {
+ this.pathParts.add(pathPart);
+ }
+
+ public List<String> getParams() {
+ return params;
+ }
+
+ void addParam(String param) {
+ this.params.add(param);
+ }
+
+ void setBodyOptional() {
+ this.body = BODY.OPTIONAL;
+ }
+
+ void setBodyRequired() {
+ this.body = BODY.REQUIRED;
+ }
+
+ public boolean isBodySupported() {
+ return body != BODY.NOT_SUPPORTED;
+ }
+
+ public boolean isBodyRequired() {
+ return body == BODY.REQUIRED;
+ }
+
+ /**
+ * Finds the best matching rest path given the current parameters and replaces
+ * placeholders with their corresponding values received as arguments
+ */
+ public String[] getFinalPaths(Map<String, String> pathParams) {
+
+ List<RestPath> matchingRestPaths = findMatchingRestPaths(pathParams.keySet());
+ if (matchingRestPaths == null || matchingRestPaths.isEmpty()) {
+ throw new IllegalArgumentException("unable to find matching rest path for api [" + name + "] and path params " + pathParams);
+ }
+
+ String[] paths = new String[matchingRestPaths.size()];
+ for (int i = 0; i < matchingRestPaths.size(); i++) {
+ RestPath restPath = matchingRestPaths.get(i);
+ String path = restPath.path;
+ for (Map.Entry<String, String> paramEntry : restPath.parts.entrySet()) {
+ // replace path placeholders with actual values
+ String value = pathParams.get(paramEntry.getValue());
+ if (value == null) {
+ throw new IllegalArgumentException("parameter [" + paramEntry.getValue() + "] missing");
+ }
+ path = path.replace(paramEntry.getKey(), value);
+ }
+ paths[i] = path;
+ }
+ return paths;
+ }
+
+ /**
+ * Finds the matching rest paths out of the available ones with the current api (based on REST spec).
+ *
+ * The best path is the one that has exactly the same number of placeholders to replace
+ * (e.g. /{index}/{type}/{id} when the path params are exactly index, type and id).
+ */
+ private List<RestPath> findMatchingRestPaths(Set<String> restParams) {
+
+ List<RestPath> matchingRestPaths = Lists.newArrayList();
+ RestPath[] restPaths = buildRestPaths();
+
+ for (RestPath restPath : restPaths) {
+ if (restPath.parts.size() == restParams.size()) {
+ if (restPath.parts.values().containsAll(restParams)) {
+ matchingRestPaths.add(restPath);
+ }
+ }
+ }
+
+ return matchingRestPaths;
+ }
+
+ private RestPath[] buildRestPaths() {
+ RestPath[] restPaths = new RestPath[paths.size()];
+ for (int i = 0; i < restPaths.length; i++) {
+ restPaths[i] = new RestPath(paths.get(i));
+ }
+ return restPaths;
+ }
+
+ private static class RestPath {
+ private static final Pattern PLACEHOLDERS_PATTERN = Pattern.compile("(\\{(.*?)})");
+
+ final String path;
+ //contains param to replace (e.g. {index}) and param key to use for lookup in the current values map (e.g. index)
+ final Map<String, String> parts;
+
+ RestPath(String path) {
+ this.path = path;
+ this.parts = extractParts(path);
+ }
+
+ private static Map<String,String> extractParts(String input) {
+ Map<String, String> parts = Maps.newHashMap();
+ Matcher matcher = PLACEHOLDERS_PATTERN.matcher(input);
+ while (matcher.find()) {
+ //key is e.g. {index}
+ String key = input.substring(matcher.start(), matcher.end());
+ if (matcher.groupCount() != 2) {
+ throw new IllegalArgumentException("no lookup key found for param [" + key + "]");
+ }
+ //to be replaced with current value found with key e.g. index
+ String value = matcher.group(2);
+ parts.put(key, value);
+ }
+ return parts;
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
new file mode 100644
index 0000000..2c3dc45
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Parser for a REST api spec (single json file)
+ */
+public class RestApiParser {
+
+ public RestApi parse(XContentParser parser) throws IOException {
+
+ try {
+ while ( parser.nextToken() != XContentParser.Token.FIELD_NAME ) {
+ //move to first field name
+ }
+
+ RestApi restApi = new RestApi(parser.currentName());
+
+ int level = -1;
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT || level >= 0) {
+
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("methods".equals(parser.currentName())) {
+ parser.nextToken();
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addMethod(parser.text());
+ }
+ }
+
+ if ("url".equals(parser.currentName())) {
+ String currentFieldName = "url";
+ int innerLevel = -1;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT || innerLevel >= 0) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_ARRAY && "paths".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addPath(parser.text());
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "parts".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addPathPart(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected parts field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "params".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addParam(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected params field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ innerLevel++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ innerLevel--;
+ }
+ }
+ }
+
+ if ("body".equals(parser.currentName())) {
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ boolean requiredFound = false;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("required".equals(parser.currentName())) {
+ requiredFound = true;
+ parser.nextToken();
+ if (parser.booleanValue()) {
+ restApi.setBodyRequired();
+ } else {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+ if (!requiredFound) {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ level++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ level--;
+ }
+
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return restApi;
+
+ } finally {
+ parser.close();
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java b/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
new file mode 100644
index 0000000..6da8591
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.support.FileUtils;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Holds the elasticsearch REST spec
+ */
+public class RestSpec {
+ Map<String, RestApi> restApiMap = Maps.newHashMap();
+
+ private RestSpec() {
+ }
+
+ void addApi(RestApi restApi) {
+ restApiMap.put(restApi.getName(), restApi);
+ }
+
+ public RestApi getApi(String api) {
+ return restApiMap.get(api);
+ }
+
+ /**
+ * Parses the complete set of REST spec available under the provided directories
+ */
+ public static RestSpec parseFrom(String optionalPathPrefix, String... paths) throws IOException {
+ RestSpec restSpec = new RestSpec();
+ for (String path : paths) {
+ for (File jsonFile : FileUtils.findJsonSpec(optionalPathPrefix, path)) {
+ try {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(new FileInputStream(jsonFile));
+ RestApi restApi = new RestApiParser().parse(parser);
+ restSpec.addApi(restApi);
+ } catch (IOException ex) {
+ throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
+ }
+ }
+ }
+ return restSpec;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/Features.java b/src/test/java/org/elasticsearch/test/rest/support/Features.java
new file mode 100644
index 0000000..05d6cfb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/Features.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Allows to register additional features supported by the tests runner.
+ * This way any runner can add extra features and use proper skip sections to avoid
+ * breaking others runners till they have implemented the new feature as well.
+ *
+ * Once all runners have implemented the feature, it can be removed from the list
+ * and the related skip sections can be removed from the tests as well.
+ */
+public final class Features {
+
+ private static final List<String> SUPPORTED = Lists.newArrayList();
+
+ private Features() {
+
+ }
+
+ /**
+ * Tells whether all the features provided as argument are supported
+ */
+ public static boolean areAllSupported(List<String> features) {
+ for (String feature : features) {
+ if (!SUPPORTED.contains(feature)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns all the supported features
+ */
+ public static List<String> getSupported() {
+ return SUPPORTED;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java b/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
new file mode 100644
index 0000000..6c81703
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.common.Strings;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileNotFoundException;
+import java.net.URL;
+import java.util.Map;
+import java.util.Set;
+
+public final class FileUtils {
+
+ private static final String YAML_SUFFIX = ".yaml";
+ private static final String JSON_SUFFIX = ".json";
+
+ private FileUtils() {
+
+ }
+
+ /**
+ * Returns the json files found within the directory provided as argument.
+ * Files are looked up in the classpath first, then outside of it if not found.
+ */
+ public static Set<File> findJsonSpec(String optionalPathPrefix, String path) throws FileNotFoundException {
+ File dir = resolveFile(optionalPathPrefix, path, null);
+
+ if (!dir.isDirectory()) {
+ throw new FileNotFoundException("file [" + path + "] is not a directory");
+ }
+
+ File[] jsonFiles = dir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ return pathname.getName().endsWith(JSON_SUFFIX);
+ }
+ });
+
+ if (jsonFiles == null || jsonFiles.length == 0) {
+ throw new FileNotFoundException("no json files found within [" + path + "]");
+ }
+
+ return Sets.newHashSet(jsonFiles);
+ }
+
+ /**
+ * Returns the yaml files found within the paths provided.
+ * Each input path can either be a single file (the .yaml suffix is optional) or a directory.
+ * Each path is looked up in the classpath first, then outside of it if not found yet.
+ */
+ public static Map<String, Set<File>> findYamlSuites(final String optionalPathPrefix, final String... paths) throws FileNotFoundException {
+ Map<String, Set<File>> yamlSuites = Maps.newHashMap();
+ for (String path : paths) {
+ collectFiles(resolveFile(optionalPathPrefix, path, YAML_SUFFIX), YAML_SUFFIX, yamlSuites);
+ }
+ return yamlSuites;
+ }
+
+ private static File resolveFile(String optionalPathPrefix, String path, String optionalFileSuffix) throws FileNotFoundException {
+ //try within classpath with and without file suffix (as it could be a single test suite)
+ URL resource = findResource(path, optionalFileSuffix);
+ if (resource == null) {
+ //try within classpath with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ resource = findResource(newPath, optionalFileSuffix);
+ if (resource == null) {
+ //if it wasn't on classpath we look outside ouf the classpath
+ File file = findFile(path, optionalFileSuffix);
+ if (!file.exists()) {
+ throw new FileNotFoundException("file [" + path + "] doesn't exist");
+ }
+ return file;
+ }
+ }
+ return new File(resource.getFile());
+ }
+
+ private static URL findResource(String path, String optionalFileSuffix) {
+ URL resource = FileUtils.class.getResource(path);
+ if (resource == null) {
+ //if not found we append the file suffix to the path (as it is optional)
+ if (Strings.hasLength(optionalFileSuffix) && !path.endsWith(optionalFileSuffix)) {
+ resource = FileUtils.class.getResource(path + optionalFileSuffix);
+ }
+ }
+ return resource;
+ }
+
+ private static File findFile(String path, String optionalFileSuffix) {
+ File file = new File(path);
+ if (!file.exists()) {
+ file = new File(path + optionalFileSuffix);
+ }
+ return file;
+ }
+
+ private static void collectFiles(final File file, final String fileSuffix, final Map<String, Set<File>> files) {
+ if (file.isFile()) {
+ String groupName = file.getParentFile().getName();
+ Set<File> filesSet = files.get(groupName);
+ if (filesSet == null) {
+ filesSet = Sets.newHashSet();
+ files.put(groupName, filesSet);
+ }
+ filesSet.add(file);
+ } else if (file.isDirectory()) {
+ walkDir(file, fileSuffix, files);
+ }
+ }
+
+ private static void walkDir(final File dir, final String fileSuffix, final Map<String, Set<File>> files) {
+ File[] children = dir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ return pathname.isDirectory() || pathname.getName().endsWith(fileSuffix);
+ }
+ });
+
+ for (File file : children) {
+ collectFiles(file, fileSuffix, files);
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java b/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java
new file mode 100644
index 0000000..9c19210
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/support/VersionUtils.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+public final class VersionUtils {
+
+ private VersionUtils() {
+
+ }
+
+ /**
+ * Parses an elasticsearch version string into an int array with an element per part
+ * e.g. 0.90.7 => [0,90,7]
+ */
+ public static int[] parseVersionNumber(String version) {
+ String[] split = version.split("\\.");
+ //we only take the first 3 parts if there are more, but less is ok too (e.g. 999)
+ int length = Math.min(3, split.length);
+ int[] versionNumber = new int[length];
+ for (int i = 0; i < length; i++) {
+ try {
+ versionNumber[i] = Integer.valueOf(split[i]);
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("version is not a number", e);
+ }
+
+ }
+ return versionNumber;
+ }
+
+ /**
+ * Compares the skip version read from a test fragment with the elasticsearch version
+ * the tests are running against and determines whether the test fragment needs to be skipped
+ */
+ public static boolean skipCurrentVersion(String skipVersion, String currentVersion) {
+ int[] currentVersionNumber = parseVersionNumber(currentVersion);
+
+ String[] skipVersions = skipVersion.split("-");
+ if (skipVersions.length > 2) {
+ throw new IllegalArgumentException("too many skip versions found");
+ }
+
+ String skipVersionLowerBound = skipVersions[0].trim();
+ String skipVersionUpperBound = skipVersions[1].trim();
+
+ int[] skipVersionLowerBoundNumber = parseVersionNumber(skipVersionLowerBound);
+ int[] skipVersionUpperBoundNumber = parseVersionNumber(skipVersionUpperBound);
+
+ int length = Math.min(skipVersionLowerBoundNumber.length, currentVersionNumber.length);
+ for (int i = 0; i < length; i++) {
+ if (currentVersionNumber[i] < skipVersionLowerBoundNumber[i]) {
+ return false;
+ }
+ if (currentVersionNumber[i] > skipVersionLowerBoundNumber[i]) {
+ break;
+ }
+ }
+
+ length = Math.min(skipVersionUpperBoundNumber.length, currentVersionNumber.length);
+ for (int i = 0; i < length; i++) {
+ if (currentVersionNumber[i] > skipVersionUpperBoundNumber[i]) {
+ return false;
+ }
+ if (currentVersionNumber[i] < skipVersionUpperBoundNumber[i]) {
+ break;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
new file mode 100644
index 0000000..a2ee377
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Ignore;
+
+import static org.hamcrest.Matchers.nullValue;
+
+@Ignore
+public abstract class AbstractParserTests extends ElasticsearchTestCase {
+
+ protected XContentParser parser;
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //this is the way to make sure that we consumed the whole yaml
+ assertThat(parser.currentToken(), nullValue());
+ parser.close();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java b/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
new file mode 100644
index 0000000..9d9ba78
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.*;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class AssertionParsersTests extends AbstractParserTests {
+
+ @Test
+ public void testParseIsTrue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get.fields._timestamp"
+ );
+
+ IsTrueParser isTrueParser = new IsTrueParser();
+ IsTrueAssertion trueAssertion = isTrueParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(trueAssertion, notNullValue());
+ assertThat(trueAssertion.getField(), equalTo("get.fields._timestamp"));
+ }
+
+ @Test
+ public void testParseIsFalse() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "docs.1._source"
+ );
+
+ IsFalseParser isFalseParser = new IsFalseParser();
+ IsFalseAssertion falseAssertion = isFalseParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(falseAssertion, notNullValue());
+ assertThat(falseAssertion.getField(), equalTo("docs.1._source"));
+ }
+
+ @Test
+ public void testParseGreaterThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ GreaterThanParser greaterThanParser = new GreaterThanParser();
+ GreaterThanAssertion greaterThanAssertion = greaterThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(greaterThanAssertion, notNullValue());
+ assertThat(greaterThanAssertion.getField(), equalTo("field"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLessThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ LessThanParser lessThanParser = new LessThanParser();
+ LessThanAssertion lessThanAssertion = lessThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(lessThanAssertion, notNullValue());
+ assertThat(lessThanAssertion.getField(), equalTo("field"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLength() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: 22}"
+ );
+
+ LengthParser lengthParser = new LengthParser();
+ LengthAssertion lengthAssertion = lengthParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ assertThat(lengthAssertion, notNullValue());
+ assertThat(lengthAssertion.getField(), equalTo("_id"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(22));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleIntegerValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 10 }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("field"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) matchAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleStringValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ foo: bar }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("foo"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(String.class));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("bar"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchArray() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{'matches': ['test_percolator_1', 'test_percolator_2']}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("matches"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(List.class));
+ List strings = (List) matchAssertion.getExpectedValue();
+ assertThat(strings.size(), equalTo(2));
+ assertThat(strings.get(0).toString(), equalTo("test_percolator_1"));
+ assertThat(strings.get(1).toString(), equalTo("test_percolator_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSourceValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _source: { responses.0.hits.total: 3, foo: bar }}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map<String, Object> expectedValue = (Map<String, Object>) matchAssertion.getExpectedValue();
+ assertThat(expectedValue.size(), equalTo(2));
+ Object o = expectedValue.get("responses.0.hits.total");
+ assertThat(o, instanceOf(Integer.class));
+ assertThat((Integer)o, equalTo(3));
+ o = expectedValue.get("foo");
+ assertThat(o, instanceOf(String.class));
+ assertThat(o.toString(), equalTo("bar"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
new file mode 100644
index 0000000..e580f0f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.DoSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DoSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseDoSectionNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " id: 1"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("get"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_index"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test_type"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionNoParamsNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "cluster.node_info: {}"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("cluster.node_info"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonBody() throws Exception {
+ String body = "{ \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }";
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: " + body
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesAsLongString() throws Exception {
+ String bodies[] = new String[]{
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }\n",
+ "{ \"f1\":\"v1\", \"f2\":42 }\n",
+ "{ \"index\": { \"_index\":\"test_index2\", \"_type\":\"test_type2\", \"_id\":\"test_id2\" } }\n",
+ "{ \"f1\":\"v2\", \"f2\":47 }\n"
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: |\n" +
+ " " + bodies[0] +
+ " " + bodies[1] +
+ " " + bodies[2] +
+ " " + bodies[3]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(4));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesRepeatedProperty() throws Exception {
+ String[] bodies = new String[] {
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }",
+ "{ \"f1\":\"v1\", \"f2\":42 }",
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: \n" +
+ " " + bodies[0] + "\n" +
+ " body: \n" +
+ " " + bodies[1]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "search:\n" +
+ " body:\n" +
+ " _source: [ include.field1, include.field2 ]\n" +
+ " query: { match_all: {} }"
+ );
+ String body = "{ \"_source\": [ \"include.field1\", \"include.field2\" ], \"query\": { \"match_all\": {} }}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("search"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodies() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " - index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " - f1: v1\n" +
+ " f2: 42\n" +
+ " - index:\n" +
+ " _index: test_index2\n" +
+ " _type: test_type2\n" +
+ " _id: test_id2\n" +
+ " - f1: v2\n" +
+ " f2: 47"
+ );
+ String[] bodies = new String[4];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+ bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_type\": \"test_type2\", \"_id\": \"test_id2\"}}";
+ bodies[3] = "{ \"f1\":\"v2\", \"f2\": 47 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodiesRepeatedProperty() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " body:\n" +
+ " f1: v1\n" +
+ " f2: 42\n"
+ );
+ String[] bodies = new String[2];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBodyMultiGet() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "mget:\n" +
+ " body:\n" +
+ " docs:\n" +
+ " - { _index: test_2, _type: test, _id: 1}\n" +
+ " - { _index: test_1, _type: none, _id: 1}"
+ );
+ String body = "{ \"docs\": [ " +
+ "{\"_index\": \"test_2\", \"_type\":\"test\", \"_id\":1}, " +
+ "{\"_index\": \"test_1\", \"_type\":\"none\", \"_id\":1} " +
+ "]}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("mget"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithBodyStringified() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: \"{ _source: true, query: { match_all: {} } }\""
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ }
+
+ @Test
+ public void testParseDoSectionWithBodiesStringifiedAndNot() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " body:\n" +
+ " - \"{ _source: true, query: { match_all: {} } }\"\n" +
+ " - { size: 100, query: { match_all: {} } }"
+ );
+
+ String body = "{ \"size\": 100, \"query\": { \"match_all\": {} } }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(2));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ assertJsonEquals(apiCallSection.getBodies().get(1), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithCatch() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n" +
+ "indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test (expected = RestTestParseException.class)
+ public void testParseDoSectionWithoutClientCallSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test
+ public void testParseDoSectionMultivaluedField() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "indices.get_field_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " field: [ text , text1 ]"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(doSection.getApiCallSection().getParams().get("field"), equalTo("text,text1"));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0));
+ }
+
+ private static void assertJsonEquals(Map<String, Object> actual, String expected) throws IOException {
+ Map<String,Object> expectedMap = JsonXContent.jsonXContent.createParser(expected).mapOrderedAndClose();
+ MatcherAssert.assertThat(actual, equalTo(expectedMap));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java b/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
new file mode 100644
index 0000000..66c7f04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class FileUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLoadSingleYamlSuite() throws Exception {
+ Map<String,Set<File>> yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "/rest-api-spec/test/get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //the path prefix is optional
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic.yaml");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //extension .yaml is optional
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+ }
+
+ @Test
+ public void testLoadMultipleYamlSuites() throws Exception {
+ //single directory
+ Map<String,Set<File>> yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+
+ //multiple directories
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //multiple paths, which can be both directories or yaml test suites (with optional file extension)
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "indices.optimize/10_basic", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("indices.optimize"), equalTo(true));
+ assertThat(yamlSuites.get("indices.optimize").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("indices.optimize"), "indices.optimize", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //files can be loaded from classpath and from file system too
+ File dir = newTempDir();
+ File file = new File(dir, "test_loading.yaml");
+ assertThat(file.createNewFile(), equalTo(true));
+
+ //load from directory outside of the classpath
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic", dir.getAbsolutePath());
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("get"), "get", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey(dir.getName()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getName()), dir.getName(), file.getName());
+
+ //load from external file (optional extension)
+ yamlSuites = FileUtils.findYamlSuites("/rest-api-spec/test", "get/10_basic", dir.getAbsolutePath() + File.separator + "test_loading");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("get"), "get", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey(dir.getName()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getName()), dir.getName(), file.getName());
+ }
+
+ private static void assertSingleFile(Map<String, Set<File>> yamlSuites, String dirName, String fileName) {
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey(dirName), equalTo(true));
+ assertSingleFile(yamlSuites.get(dirName), dirName, fileName);
+ }
+
+ private static void assertSingleFile(Set<File> files, String dirName, String fileName) {
+ assertThat(files.size(), equalTo(1));
+ File file = files.iterator().next();
+ assertThat(file.getName(), equalTo(fileName));
+ assertThat(file.getParentFile().getName(), equalTo(dirName));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java b/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
new file mode 100644
index 0000000..bf56f44
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.json.JsonPath;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+public class JsonPathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testEvaluateObjectPathEscape() throws Exception {
+ String json = "{ \"field1\": { \"field2.field3\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2\\.field3");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathWithDoubleDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1..field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathEndsWithDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2.");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateString() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateInteger() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 333 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Integer.class));
+ assertThat((Integer)object, equalTo(333));
+ }
+
+ @Test
+ public void testEvaluateDouble() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 3.55 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Double.class));
+ assertThat((Double)object, equalTo(3.55));
+ }
+
+ @Test
+ public void testEvaluateArray() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1");
+ assertThat(object, instanceOf(List.class));
+ List list = (List) object;
+ assertThat(list.size(), equalTo(2));
+ assertThat(list.get(0), instanceOf(String.class));
+ assertThat((String)list.get(0), equalTo("value1"));
+ assertThat(list.get(1), instanceOf(String.class));
+ assertThat((String)list.get(1), equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElement() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObject() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1.element");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObjectWrongPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array2.1.element");
+ assertThat(object, nullValue());
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateObjectKeys() throws Exception {
+ String json = "{ \"metadata\": { \"templates\" : {\"template_1\": { \"field\" : \"value\"}, \"template_2\": { \"field\" : \"value\"} } } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("metadata.templates");
+ assertThat(object, instanceOf(Map.class));
+ Map<String, Object> map = (Map<String, Object>)object;
+ assertThat(map.size(), equalTo(2));
+ Set<String> strings = map.keySet();
+ assertThat(strings, contains("template_1", "template_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateEmptyPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("");
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Map.class));
+ assertThat(((Map<String, Object>)object).containsKey("field1"), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
new file mode 100644
index 0000000..c86660f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class RestApiParserFailingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParams() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARAMS, "Expected params field in rest api definition to contain an object");
+ }
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParts() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARTS, "Expected parts field in rest api definition to contain an object");
+ }
+
+ private void parseAndExpectFailure(String brokenJson, String expectedErrorMessage) throws Exception {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(brokenJson);
+ try {
+ new RestApiParser().parse(parser);
+ fail("Expected to fail parsing but did not happen");
+ } catch (IOException e) {
+ assertThat(e.getMessage(), containsString(expectedErrorMessage));
+ }
+
+ }
+
+ // see params section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARAMS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " }," +
+ " \"params\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " }" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+ // see parts section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARTS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " }," +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
new file mode 100644
index 0000000..5558041
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class RestApiParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseRestSpecIndexApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_INDEX_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("index"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("PUT"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/{index}/{type}"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/{type}/{id}"));
+ assertThat(restApi.getPathParts().size(), equalTo(3));
+ assertThat(restApi.getPathParts().get(0), equalTo("id"));
+ assertThat(restApi.getPathParts().get(1), equalTo("index"));
+ assertThat(restApi.getPathParts().get(2), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(4));
+ assertThat(restApi.getParams(), contains("consistency", "op_type", "parent", "refresh"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(true));
+ }
+
+ @Test
+ public void testParseRestSpecGetTemplateApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_GET_TEMPLATE_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("indices.get_template"));
+ assertThat(restApi.getMethods().size(), equalTo(1));
+ assertThat(restApi.getMethods().get(0), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/_template"));
+ assertThat(restApi.getPaths().get(1), equalTo("/_template/{name}"));
+ assertThat(restApi.getPathParts().size(), equalTo(1));
+ assertThat(restApi.getPathParts().get(0), equalTo("name"));
+ assertThat(restApi.getParams().size(), equalTo(0));
+ assertThat(restApi.isBodySupported(), equalTo(false));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRestSpecCountApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_COUNT_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("count"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(3));
+ assertThat(restApi.getPaths().get(0), equalTo("/_count"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/_count"));
+ assertThat(restApi.getPaths().get(2), equalTo("/{index}/{type}/_count"));
+ assertThat(restApi.getPathParts().size(), equalTo(2));
+ assertThat(restApi.getPathParts().get(0), equalTo("index"));
+ assertThat(restApi.getPathParts().get(1), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(1));
+ assertThat(restApi.getParams().get(0), equalTo("ignore_unavailable"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ private static final String REST_SPEC_COUNT_API = "{\n" +
+ " \"count\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-count.html\",\n" +
+ " \"methods\": [\"POST\", \"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_count\",\n" +
+ " \"paths\": [\"/_count\", \"/{index}/_count\", \"/{index}/{type}/_count\"],\n" +
+ " \"parts\": {\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of indices to restrict the results\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of types to restrict the results\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"A query to restrict the results specified with the Query DSL (optional)\"\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+
+ private static final String REST_SPEC_GET_TEMPLATE_API = "{\n" +
+ " \"indices.get_template\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/reference/api/admin-indices-templates/\",\n" +
+ " \"methods\": [\"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_template/{name}\",\n" +
+ " \"paths\": [\"/_template\", \"/_template/{name}\"],\n" +
+ " \"parts\": {\n" +
+ " \"name\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : false,\n" +
+ " \"description\" : \"The name of the template\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": null\n" +
+ " }\n" +
+ "}";
+
+ private static final String REST_SPEC_INDEX_API = "{\n" +
+ " \"index\": {\n" +
+ " \"documentation\": \"http://elasticsearch.org/guide/reference/api/index_/\",\n" +
+ " \"methods\": [\"POST\", \"PUT\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/{index}/{type}\",\n" +
+ " \"paths\": [\"/{index}/{type}\", \"/{index}/{type}/{id}\"],\n" +
+ " \"parts\": {\n" +
+ " \"id\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"Document ID\"\n" +
+ " },\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The name of the index\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The type of the document\"\n" +
+ " }\n" +
+ " } ,\n" +
+ " \"params\": {\n" +
+ " \"consistency\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"one\", \"quorum\", \"all\"],\n" +
+ " \"description\" : \"Explicit write consistency setting for the operation\"\n" +
+ " },\n" +
+ " \"op_type\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"index\", \"create\"],\n" +
+ " \"default\" : \"index\",\n" +
+ " \"description\" : \"Explicit operation type\"\n" +
+ " },\n" +
+ " \"parent\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"ID of the parent document\"\n" +
+ " },\n" +
+ " \"refresh\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Refresh the index after performing the operation\"\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"The document\",\n" +
+ " \"required\" : true\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
new file mode 100644
index 0000000..9a06328
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
@@ -0,0 +1,536 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.junit.After;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class RestTestParserTests extends ElasticsearchTestCase {
+
+ private XContentParser parser;
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //makes sure that we consumed the whole stream, XContentParser doesn't expose isClosed method
+ //next token can be null even in the middle of the document (e.g. with "---"), but not too many consecutive times
+ assertThat(parser.currentToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ parser.close();
+ }
+
+ @Test
+ public void testParseTestSetupAndSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create"));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index"));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getVersion(), equalTo("0.90.9 - 999"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipLastSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "1.0.0"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create"));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index"));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getVersion(), equalTo("0.90.9 - 999"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipEntireFile() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - skip:\n" +
+ " version: \"0.90.3 - 0.90.6\"\n" +
+ " reason: \"test skip entire file\"\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getVersion(), equalTo("0.90.3 - 0.90.6"));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getReason(), equalTo("test skip entire file"));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(0));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSetupAndSectionsSkipEntireFileNoDo() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - skip:\n" +
+ " version: \"0.90.3 - 0.90.6\"\n" +
+ " reason: \"test skip entire file\"\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - 999\"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getVersion(), equalTo("0.90.3 - 0.90.6"));
+ assertThat(restTestSuite.getSetupSection().getSkipSection().getReason(), equalTo("test skip entire file"));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(0));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSingleTestSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Index with ID\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ " body: { foo: bar }\n" +
+ "\n" +
+ " - is_true: ok\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ "\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ " - match: { _source: { foo: bar }}"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(1));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(IsTrueAssertion.class));
+ IsTrueAssertion trueAssertion = (IsTrueAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(trueAssertion.getField(), equalTo("ok"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(3), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(3);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(5);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(6);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(10), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(10);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(11), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(11);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ assertThat(((Map) matchAssertion.getExpectedValue()).get("foo").toString(), equalTo("bar"));
+ }
+
+ @Test
+ public void testParseTestMultipleTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (partial doc)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ " ignore: 404\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " ignore: 404\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Missing document (partial doc)"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(2));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseTestDuplicateTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ testParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
new file mode 100644
index 0000000..fb9d7b9
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetSectionParser;
+import org.elasticsearch.test.rest.section.SetSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetSectionSingleValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ }
+
+ @Test
+ public void testParseSetSectionMultipleValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id, _type: type, _index: index }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(3));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ assertThat(setSection.getStash().get("_type"), equalTo("type"));
+ assertThat(setSection.getStash().get("_index"), equalTo("index"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSetSectionNoValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
new file mode 100644
index 0000000..191bc3d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetupSectionParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetupSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetupSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(true));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+
+ @Test
+ public void testParseSetupAndSkipSectionSkip() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
+ assertThat(setupSection.getSkipSection(), notNullValue());
+ assertThat(setupSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(setupSection.getDoSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseSetupAndSkipSectionNoSkip() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.8"));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
+ assertThat(setupSection.getSkipSection(), notNullValue());
+ assertThat(setupSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
new file mode 100644
index 0000000..4209e90
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SkipSectionParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public class SkipSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSkipSectionVersionNoFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), equalTo("0 - 0.90.2"));
+ assertThat(skipSection.getFeatures().size(), equalTo(0));
+ assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param"));
+ }
+
+ @Test
+ public void testParseSkipSectionFeatureNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: regex"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), nullValue());
+ assertThat(skipSection.getFeatures().size(), equalTo(1));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test
+ public void testParseSkipSectionFeaturesNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: [regex1,regex2,regex3]"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getVersion(), nullValue());
+ assertThat(skipSection.getFeatures().size(), equalTo(3));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex1"));
+ assertThat(skipSection.getFeatures().get(1), equalTo("regex2"));
+ assertThat(skipSection.getFeatures().get(2), equalTo("regex3"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionBothFeatureAndVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n" +
+ "features: regex\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoReason() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \"0 - 0.90.2\"\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoVersionNorFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "reason: Delete ignores the parent param\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
new file mode 100644
index 0000000..f98f67f
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class TestSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseTestSectionWithDoSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"First test section\": \n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(1));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSetAndSkipSectionsSkip() throws Exception {
+ String yaml =
+ "\"First test section\": \n" +
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer\n" +
+ " - set: {_scroll_id: scroll_id}";
+
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ parser = YamlXContent.yamlXContent.createParser(yaml);
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.7"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), notNullValue());
+ assertThat(testSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ //skip parsing when needed
+ assertThat(testSection.getExecutableSections().size(), equalTo(0));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exception {
+ String yaml =
+ "\"First test section\": \n" +
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer\n" +
+ " - set: {_scroll_id: scroll_id}";
+
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ parser = YamlXContent.yamlXContent.createParser(yaml);
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.8"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), notNullValue());
+ assertThat(testSection.getSkipSection().getVersion(), equalTo("0.90.0 - 0.90.7"));
+ assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ SetSection setSection = (SetSection) testSection.getExecutableSections().get(1);
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_scroll_id"), equalTo("scroll_id"));
+ }
+
+ @Test
+ public void testParseTestSectionWithMultipleDoSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - match: { _index: test_1 }\n" +
+ " - is_true: _source\n" +
+ " - match: { _source: { foo: \"Hello: 中文\" } }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - length: { _index: 6 }\n" +
+ " - is_false: whatever\n" +
+ " - gt: { size: 5 }\n" +
+ " - lt: { size: 10 }"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(10));
+
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ MatchAssertion matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test_1"));
+
+ IsTrueAssertion trueAssertion = (IsTrueAssertion)testSection.getExecutableSections().get(3);
+ assertThat(trueAssertion.getField(), equalTo("_source"));
+
+ matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map map = (Map) matchAssertion.getExpectedValue();
+ assertThat(map.size(), equalTo(1));
+ assertThat(map.get("foo").toString(), equalTo("Hello: 中文"));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(5);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ LengthAssertion lengthAssertion = (LengthAssertion) testSection.getExecutableSections().get(6);
+ assertThat(lengthAssertion.getField(), equalTo("_index"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(6));
+
+ IsFalseAssertion falseAssertion = (IsFalseAssertion)testSection.getExecutableSections().get(7);
+ assertThat(falseAssertion.getField(), equalTo("whatever"));
+
+ GreaterThanAssertion greaterThanAssertion = (GreaterThanAssertion) testSection.getExecutableSections().get(8);
+ assertThat(greaterThanAssertion.getField(), equalTo("size"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(5));
+
+ LessThanAssertion lessThanAssertion = (LessThanAssertion) testSection.getExecutableSections().get(9);
+ assertThat(lessThanAssertion.getField(), equalTo("size"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ public void testSmallSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"node_info test\":\n" +
+ " - do:\n" +
+ " cluster.node_info: {}\n" +
+ " \n" +
+ " - is_true: nodes\n" +
+ " - is_true: cluster_name\n");
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser, "0.90.5"));
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("node_info test"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(3));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java b/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java
new file mode 100644
index 0000000..3960012
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/rest/test/VersionUtilsTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.rest.support.VersionUtils.parseVersionNumber;
+import static org.elasticsearch.test.rest.support.VersionUtils.skipCurrentVersion;
+import static org.hamcrest.Matchers.*;
+
+public class VersionUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseVersionNumber() {
+
+ int[] versionNumber = parseVersionNumber("0.90.6");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(90));
+ assertThat(versionNumber[2], equalTo(6));
+
+ versionNumber = parseVersionNumber("0.90.999");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(90));
+ assertThat(versionNumber[2], equalTo(999));
+
+ versionNumber = parseVersionNumber("0.20.11");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(0));
+ assertThat(versionNumber[1], equalTo(20));
+ assertThat(versionNumber[2], equalTo(11));
+
+ versionNumber = parseVersionNumber("1.0.0.Beta1");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0.0.RC1");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0.0");
+ assertThat(versionNumber.length, equalTo(3));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+ assertThat(versionNumber[2], equalTo(0));
+
+ versionNumber = parseVersionNumber("1.0");
+ assertThat(versionNumber.length, equalTo(2));
+ assertThat(versionNumber[0], equalTo(1));
+ assertThat(versionNumber[1], equalTo(0));
+
+ versionNumber = parseVersionNumber("999");
+ assertThat(versionNumber.length, equalTo(1));
+ assertThat(versionNumber[0], equalTo(999));
+
+ versionNumber = parseVersionNumber("0");
+ assertThat(versionNumber.length, equalTo(1));
+ assertThat(versionNumber[0], equalTo(0));
+
+ try {
+ parseVersionNumber("1.0.Beta1");
+ fail("parseVersionNumber should have thrown an error");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("version is not a number"));
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+ }
+
+ @Test
+ public void testSkipCurrentVersion() {
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.2"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.3"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.6"), equalTo(true));
+
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.20.10"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.1"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "0.90.7"), equalTo(false));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.6", "1.0.0"), equalTo(false));
+
+ assertThat(skipCurrentVersion(" 0.90.2 - 0.90.999 ", "0.90.15"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.999", "1.0.0"), equalTo(false));
+
+ assertThat(skipCurrentVersion("0 - 999", "0.90.15"), equalTo(true));
+ assertThat(skipCurrentVersion("0 - 999", "0.20.1"), equalTo(true));
+ assertThat(skipCurrentVersion("0 - 999", "1.0.0"), equalTo(true));
+
+ assertThat(skipCurrentVersion("0.90.9 - 999", "1.0.0"), equalTo(true));
+ assertThat(skipCurrentVersion("0.90.9 - 999", "0.90.8"), equalTo(false));
+
+ try {
+ assertThat(skipCurrentVersion("0.90.2 - 0.90.999 - 1.0.0", "1.0.0"), equalTo(false));
+ fail("skipCurrentVersion should have thrown an error");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("too many skip versions found"));
+ }
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java
new file mode 100644
index 0000000..e9df739
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MMapDirectory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.fs.FsDirectoryService;
+import org.elasticsearch.index.store.fs.MmapFsDirectoryService;
+import org.elasticsearch.index.store.fs.NioFsDirectoryService;
+import org.elasticsearch.index.store.fs.SimpleFsDirectoryService;
+import org.elasticsearch.index.store.memory.ByteBufferDirectoryService;
+import org.elasticsearch.index.store.ram.RamDirectoryService;
+import org.elasticsearch.test.TestCluster;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.Set;
+
+public class MockDirectoryHelper {
+ public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate";
+ public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open";
+ public static final String RANDOM_THROTTLE = "index.store.mock.random.throttle";
+ public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close";
+ public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write";
+ public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file";
+ public static final String RANDOM_FAIL_ON_CLOSE= "index.store.mock.random.fail_on_close";
+
+ public static final Set<ElasticsearchMockDirectoryWrapper> wrappers = ConcurrentCollections.newConcurrentSet();
+
+ private final Random random;
+ private final double randomIOExceptionRate;
+ private final double randomIOExceptionRateOnOpen;
+ private final Throttling throttle;
+ private final boolean checkIndexOnClose;
+ private final Settings indexSettings;
+ private final ShardId shardId;
+ private final boolean preventDoubleWrite;
+ private final boolean noDeleteOpenFile;
+ private final ESLogger logger;
+ private final boolean failOnClose;
+
+ public MockDirectoryHelper(ShardId shardId, Settings indexSettings, ESLogger logger) {
+ final long seed = indexSettings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d);
+ randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d);
+ preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW
+ noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW
+ random.nextInt(shardId.getId() + 1); // some randomness per shard
+ throttle = Throttling.valueOf(indexSettings.get(RANDOM_THROTTLE, random.nextDouble() < 0.1 ? "SOMETIMES" : "NEVER"));
+ checkIndexOnClose = indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, false);// we can't do this by default since it might close the index input that we still read from in a pending fetch phase.
+ failOnClose = indexSettings.getAsBoolean(RANDOM_FAIL_ON_CLOSE, false);
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] checkIndexOnClose: [{}]", SeedUtils.formatSeed(seed),
+ throttle, checkIndexOnClose);
+ }
+ this.indexSettings = indexSettings;
+ this.shardId = shardId;
+ this.logger = logger;
+ }
+
+ public Directory wrap(Directory dir) {
+ final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, logger, failOnClose);
+ w.setRandomIOExceptionRate(randomIOExceptionRate);
+ w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen);
+ w.setThrottling(throttle);
+ w.setCheckIndexOnClose(checkIndexOnClose);
+ w.setPreventDoubleWrite(preventDoubleWrite);
+ w.setNoDeleteOpenFile(noDeleteOpenFile);
+ wrappers.add(w);
+ return w;
+ }
+
+ public Directory[] wrapAllInplace(Directory[] dirs) {
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = wrap(dirs[i]);
+ }
+ return dirs;
+ }
+
+ public FsDirectoryService randomDirectorService(IndexStore indexStore) {
+ if ((Constants.WINDOWS || Constants.SUN_OS) && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
+ return new MmapFsDirectoryService(shardId, indexSettings, indexStore);
+ } else if (Constants.WINDOWS) {
+ return new SimpleFsDirectoryService(shardId, indexSettings, indexStore);
+ }
+ switch (random.nextInt(3)) {
+ case 1:
+ return new MmapFsDirectoryService(shardId, indexSettings, indexStore);
+ case 0:
+ return new SimpleFsDirectoryService(shardId, indexSettings, indexStore);
+ default:
+ return new NioFsDirectoryService(shardId, indexSettings, indexStore);
+ }
+ }
+
+ public DirectoryService randomRamDirecoryService(ByteBufferCache byteBufferCache) {
+ switch (random.nextInt(2)) {
+ case 0:
+ return new RamDirectoryService(shardId, indexSettings);
+ default:
+ return new ByteBufferDirectoryService(shardId, indexSettings, byteBufferCache);
+ }
+
+ }
+
+ public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper {
+
+ private final ESLogger logger;
+ private final boolean failOnClose;
+
+ public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, ESLogger logger, boolean failOnClose) {
+ super(random, delegate);
+ this.logger = logger;
+ this.failOnClose = failOnClose;
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ super.close();
+ } catch (RuntimeException ex) {
+ if (failOnClose) {
+ throw ex;
+ }
+ // we catch the exception on close to properly close shards even if there are open files
+ // the test framework will call closeWithRuntimeException after the test exits to fail
+ // on unclosed files.
+ logger.debug("MockDirectoryWrapper#close() threw exception", ex);
+ }
+ }
+
+ public void closeWithRuntimeException() throws IOException {
+ super.close(); // force fail if open files etc. called in tear down of ElasticsearchIntegrationTest
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
new file mode 100644
index 0000000..69a6dc0
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.LockFactory;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.fs.FsDirectoryService;
+
+import java.io.File;
+import java.io.IOException;
+
+public class MockFSDirectoryService extends FsDirectoryService {
+
+ private final MockDirectoryHelper helper;
+ private FsDirectoryService delegateService;
+
+ @Inject
+ public MockFSDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
+ super(shardId, indexSettings, indexStore);
+ helper = new MockDirectoryHelper(shardId, indexSettings, logger);
+ delegateService = helper.randomDirectorService(indexStore);
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return helper.wrapAllInplace(delegateService.build());
+ }
+
+ @Override
+ protected synchronized FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
new file mode 100644
index 0000000..8005a62
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.fs.FsIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+
+public class MockFSIndexStore extends FsIndexStore {
+
+ @Inject
+ public MockFSIndexStore(Index index, Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) {
+ super(index, indexSettings, indexService, indicesStore, nodeEnv);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MockFSDirectoryService.class;
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
new file mode 100644
index 0000000..c4f9d20
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+public class MockFSIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MockFSIndexStore.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java
new file mode 100644
index 0000000..a2a6aa4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamDirectoryService.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.cache.memory.ByteBufferCache;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.AbstractIndexShardComponent;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+
+import java.io.IOException;
+
+public class MockRamDirectoryService extends AbstractIndexShardComponent implements DirectoryService {
+
+ private final MockDirectoryHelper helper;
+ private final DirectoryService delegateService;
+
+ @Inject
+ public MockRamDirectoryService(ShardId shardId, Settings indexSettings, ByteBufferCache byteBufferCache) {
+ super(shardId, indexSettings);
+ helper = new MockDirectoryHelper(shardId, indexSettings, logger);
+ delegateService = helper.randomRamDirecoryService(byteBufferCache);
+ }
+
+ @Override
+ public Directory[] build() throws IOException {
+ return helper.wrapAllInplace(delegateService.build());
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return delegateService.throttleTimeInNanos();
+ }
+
+ @Override
+ public void renameFile(Directory dir, String from, String to) throws IOException {
+ delegateService.renameFile(dir, from, to);
+ }
+
+ @Override
+ public void fullDelete(Directory dir) throws IOException {
+ delegateService.fullDelete(dir);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java
new file mode 100644
index 0000000..51aacad
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamIndexStore.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.Inject;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.service.IndexService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.support.AbstractIndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.monitor.jvm.JvmInfo;
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+public class MockRamIndexStore extends AbstractIndexStore{
+
+ @Inject
+ public MockRamIndexStore(Index index, Settings indexSettings, IndexService indexService, IndicesStore indicesStore) {
+ super(index, indexSettings, indexService, indicesStore);
+ }
+
+ @Override
+ public boolean persistent() {
+ return false;
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MockRamDirectoryService.class;
+ }
+
+ @Override
+ public ByteSizeValue backingStoreTotalSpace() {
+ return JvmInfo.jvmInfo().getMem().heapMax();
+ }
+
+ @Override
+ public ByteSizeValue backingStoreFreeSpace() {
+ return JvmStats.jvmStats().getMem().heapUsed();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java b/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java
new file mode 100644
index 0000000..b3bdccb
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/store/MockRamIndexStoreModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+public class MockRamIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MockRamIndexStore.class).asEagerSingleton();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
new file mode 100644
index 0000000..159b427
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestCluster;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+
+import java.io.IOException;
+import java.util.Random;
+
+/**
+ *
+ */
+public class AssertingLocalTransport extends LocalTransport {
+ private final Random random;
+
+ @Inject
+ public AssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version) {
+ super(settings, threadPool, version);
+ final long seed = settings.getAsLong(TestCluster.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ }
+
+ @Override
+ protected void handleParsedRespone(final TransportResponse response, final TransportResponseHandler handler) {
+ ElasticsearchAssertions.assertVersionSerializable(ElasticsearchTestCase.randomVersion(random), response);
+ super.handleParsedRespone(response, handler);
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ ElasticsearchAssertions.assertVersionSerializable(ElasticsearchTestCase.randomVersion(random), request);
+ super.sendRequest(node, requestId, action, request, options);
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java
new file mode 100644
index 0000000..47c8ee1
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransportModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.Transport;
+
+/**
+ *
+ */
+public class AssertingLocalTransportModule extends AbstractModule {
+
+ private final Settings settings;
+
+ public AssertingLocalTransportModule(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ protected void configure() {
+ bind(AssertingLocalTransport.class).asEagerSingleton();
+ bind(Transport.class).to(AssertingLocalTransport.class).asEagerSingleton();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java b/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
new file mode 100644
index 0000000..bd2553d
--- /dev/null
+++ b/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.*;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope=Scope.TEST, numNodes=2)
+public class SimpleThreadPoolTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test(timeout = 20000)
+ public void testUpdatingThreadPoolSettings() throws Exception {
+ ThreadPool threadPool = cluster().getInstance(ThreadPool.class);
+ // Check that settings are changed
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(5L));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build()).execute().actionGet();
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Make sure that threads continue executing when executor is replaced
+ final CyclicBarrier barrier = new CyclicBarrier(2);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ barrier.await();
+
+ // Make sure that new thread executor is functional
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ barrier.await();
+ Thread.sleep(200);
+
+ // Check that node info is correct
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ NodeInfo nodeInfo = nodesInfoResponse.getNodes()[i];
+ boolean found = false;
+ for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
+ if (info.getName().equals(Names.SEARCH)) {
+ assertThat(info.getType(), equalTo("fixed"));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ Map<String, Object> poolMap = getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);
+ }
+ }
+
+ private Map<String, Object> getPoolSettingsThroughJson(ThreadPoolInfo info, String poolName) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ info.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(builder.string());
+ Map<String, Object> poolsMap = parser.mapAndClose();
+ return (Map<String, Object>) ((Map<String, Object>) poolsMap.get("thread_pool")).get(poolName);
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
new file mode 100644
index 0000000..04d99ae
--- /dev/null
+++ b/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class UpdateThreadPoolSettingsTests extends ElasticsearchTestCase {
+
+ private ThreadPool.Info info(ThreadPool threadPool, String name) {
+ for (ThreadPool.Info info : threadPool.info()) {
+ if (info.getName().equals(name)) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testCachedExecutorType() {
+ ThreadPool threadPool = new ThreadPool(ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").build(), null);
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "same").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("same"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(ListeningExecutorService.class));
+
+ // Replace with different type again
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(1));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "cached").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ // Make sure keep alive value reused
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change keep alive
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Set the same keep alive
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value didn't change
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ threadPool.shutdown();
+ }
+
+ @Test
+ public void testFixedExecutorType() {
+ ThreadPool threadPool = new ThreadPool(settingsBuilder().put("threadpool.search.type", "fixed").build(), null);
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "fixed")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ // Make sure keep alive value is not used
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive(), nullValue());
+ // Make sure keep pool size value were reused
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(15));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+
+ // Change size
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.size", "10").build());
+ // Make sure size values changed
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(10));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Change queue capacity
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.queue", "500")
+ .build());
+
+ threadPool.shutdown();
+ }
+
+
+ @Test
+ public void testScalingExecutorType() {
+ ThreadPool threadPool = new ThreadPool(
+ settingsBuilder().put("threadpool.search.type", "scaling").put("threadpool.search.size", 10).build(), null);
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(1));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change settings that doesn't require pool replacement
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ threadPool.shutdown();
+ }
+
+ @Test(timeout = 10000)
+ public void testShutdownDownNowDoesntBlock() throws Exception {
+ ThreadPool threadPool = new ThreadPool(ImmutableSettings.settingsBuilder().put("threadpool.search.type", "cached").build(), null);
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(20000);
+ } catch (InterruptedException ex) {
+ latch.countDown();
+ Thread.currentThread().interrupt();
+ }
+ }
+ });
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "fixed").build());
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ threadPool.shutdownNow();
+ latch.await();
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java b/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
new file mode 100644
index 0000000..03cce04
--- /dev/null
+++ b/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.timestamp;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SimpleTimestampTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleTimestamp() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_timestamp").field("enabled", true).field("store", "yes").endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> check with automatic timestamp");
+ long now1 = System.currentTimeMillis();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+ long now2 = System.currentTimeMillis();
+
+ // we check both realtime get and non realtime get
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ // non realtime get (stored)
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (numeric)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("10").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(10l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (string)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("1970-01-01T00:00:00.020").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(20l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
new file mode 100644
index 0000000..ee4dc16
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
@@ -0,0 +1,875 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase {
+
+ protected ThreadPool threadPool;
+
+ protected static final Version version0 = Version.fromId(/*0*/99);
+ protected DiscoveryNode nodeA;
+ protected TransportService serviceA;
+
+ protected static final Version version1 = Version.fromId(199);
+ protected DiscoveryNode nodeB;
+ protected TransportService serviceB;
+
+ protected abstract TransportService build(Settings settings, Version version);
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new ThreadPool();
+ serviceA = build(ImmutableSettings.builder().put("name", "TS_A").build(), version0);
+ nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version0);
+ serviceB = build(ImmutableSettings.builder().put("name", "TS_B").build(), version1);
+ nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version1);
+
+ // wait till all nodes are properly connected and the event has been sent, so tests in this class
+ // will not get this callback called on the connections done in this setup
+ final CountDownLatch latch = new CountDownLatch(4);
+ TransportConnectionListener waitForConnection = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ fail("disconnect should not be called " + node);
+ }
+ };
+ serviceA.addConnectionListener(waitForConnection);
+ serviceB.addConnectionListener(waitForConnection);
+
+ serviceA.connectToNode(nodeB);
+ serviceA.connectToNode(nodeA);
+ serviceB.connectToNode(nodeA);
+ serviceB.connectToNode(nodeB);
+
+ assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ serviceA.removeConnectionListener(waitForConnection);
+ serviceB.removeConnectionListener(waitForConnection);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ serviceA.close();
+ serviceB.close();
+ threadPool.shutdown();
+ }
+
+ @Test
+ public void testHelloWorld() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testVoidMessageCompressed() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<TransportRequest.Empty>() {
+ @Override
+ public TransportRequest.Empty newInstance() {
+ return TransportRequest.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(TransportRequest.Empty request, TransportChannel channel) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<TransportResponse.Empty> res = serviceB.submitRequest(nodeA, "sayHello",
+ TransportRequest.Empty.INSTANCE, TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<TransportResponse.Empty>() {
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ TransportResponse.Empty message = res.get();
+ assertThat(message, notNullValue());
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testHelloWorldCompressed() {
+ serviceA.registerHandler("sayHello", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message), TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testErrorMessage() {
+ serviceA.registerHandler("sayHelloException", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ assertThat("moshe", equalTo(request.message));
+ throw new RuntimeException("bad message !!!");
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloException",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat("bad message !!!", equalTo(exp.getCause().getMessage()));
+ }
+ });
+
+ try {
+ res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat("bad message !!!", equalTo(e.getCause().getMessage()));
+ }
+
+ serviceA.removeHandler("sayHelloException");
+ }
+
+ @Test
+ public void testDisconnectListener() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ TransportConnectionListener disconnectListener = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ fail("node connected should not be called, all connection have been done previously, node: " + node);
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+ };
+ serviceA.addConnectionListener(disconnectListener);
+ serviceB.close();
+ assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception {
+ serviceA.registerHandler("sayHelloTimeoutNoResponse", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ // don't send back a response
+// try {
+// channel.sendResponse(new StringMessage("hello " + request.message));
+// } catch (IOException e) {
+// e.printStackTrace();
+// assertThat(e.getMessage(), false, equalTo(true));
+// }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutNoResponse",
+ new StringMessageRequest("moshe"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutNoResponse");
+ }
+
+ @Test
+ @TestLogging("_root:TRACE")
+ public void testTimeoutSendExceptionWithDelayedResponse() throws Exception {
+ serviceA.registerHandler("sayHelloTimeoutDelayedResponse", new BaseTransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public StringMessageRequest newInstance() {
+ return new StringMessageRequest();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ TimeValue sleep = TimeValue.parseTimeValue(request.message, null);
+ try {
+ Thread.sleep(sleep.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest("300ms"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("got response instead of exception", false, equalTo(true));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ assertThat("exception should be thrown", false, equalTo(true));
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ // sleep for 400 millis to make sure we get back the response
+ Thread.sleep(400);
+
+ for (int i = 0; i < 10; i++) {
+ final int counter = i;
+ // now, try and send another request, this times, with a short timeout
+ res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest(counter + "ms"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello " + counter + "ms", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response for " + counter + ": " + exp.getDetailedMessage(), false, equalTo(true));
+ }
+ });
+
+ StringMessageResponse message = res.txGet();
+ assertThat(message.message, equalTo("hello " + counter + "ms"));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
+ }
+
+ static class StringMessageRequest extends TransportRequest {
+
+ private String message;
+
+ StringMessageRequest(String message) {
+ this.message = message;
+ }
+
+ StringMessageRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ }
+ }
+
+ static class StringMessageResponse extends TransportResponse {
+
+ private String message;
+
+ StringMessageResponse(String message) {
+ this.message = message;
+ }
+
+ StringMessageResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ }
+ }
+
+
+ static class Version0Request extends TransportRequest {
+
+ int value1;
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Request extends Version0Request {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ static class Version0Response extends TransportResponse {
+
+ int value1;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Response extends Version0Response {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ @Test
+ public void testVersion_from0to1() throws Exception {
+ serviceB.registerHandler("/version", new BaseTransportRequestHandler<Version1Request>() {
+ @Override
+ public Version1Request newInstance() {
+ return new Version1Request();
+ }
+
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(0)); // not set, coming from service A
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeB, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+
+ @Test
+ public void testVersion_from1to0() throws Exception {
+ serviceA.registerHandler("/version", new BaseTransportRequestHandler<Version0Request>() {
+ @Override
+ public Version0Request newInstance() {
+ return new Version0Request();
+ }
+
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeA, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(0)); // initial values, cause its serialized from version 0
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(0));
+ }
+
+ @Test
+ public void testVersion_from1to1() throws Exception {
+ serviceB.registerHandler("/version", new BaseTransportRequestHandler<Version1Request>() {
+ @Override
+ public Version1Request newInstance() {
+ return new Version1Request();
+ }
+
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(2));
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeB, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(2));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(2));
+ }
+
+ @Test
+ public void testVersion_from0to0() throws Exception {
+ serviceA.registerHandler("/version", new BaseTransportRequestHandler<Version0Request>() {
+ @Override
+ public Version0Request newInstance() {
+ return new Version0Request();
+ }
+
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeA, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java b/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
new file mode 100644
index 0000000..2b6b42e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+import org.elasticsearch.transport.TransportService;
+
+public class SimpleLocalTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected TransportService build(Settings settings, Version version) {
+ return new TransportService(new LocalTransport(settings, threadPool, version), threadPool).start();
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
new file mode 100644
index 0000000..28c0fda
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.ElasticsearchIllegalStateException;
+import org.elasticsearch.common.util.concurrent.KeyedLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class KeyedLockTests extends ElasticsearchTestCase {
+
+ @Test
+ public void checkIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counter = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ for (int i = 0; i < names.length; i++) {
+ names[i] = randomRealisticUnicodeOfLengthBetween(10, 20);
+ }
+ CountDownLatch startLatch = new CountDownLatch(1);
+ int numThreads = randomIntBetween(3, 10);
+ Thread[] threads = new Thread[numThreads];
+ for (int i = 0; i < numThreads; i++) {
+ threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter);
+ }
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].start();
+ }
+ startLatch.countDown();
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].join();
+ }
+ assertThat(connectionLock.hasLockedKeys(), equalTo(false));
+
+ Set<Entry<String, Integer>> entrySet = counter.entrySet();
+ assertThat(counter.size(), equalTo(safeCounter.size()));
+ for (Entry<String, Integer> entry : entrySet) {
+ AtomicInteger atomicInteger = safeCounter.get(entry.getKey());
+ assertThat(atomicInteger, not(Matchers.nullValue()));
+ assertThat(atomicInteger.get(), equalTo(entry.getValue()));
+ }
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void checkCannotAcquireTwoLocks() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counters = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ connectionLock = new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(atLeast(10));
+ connectionLock.acquire(name);
+ connectionLock.acquire(name);
+ }
+
+ @Test(expected = ElasticsearchIllegalStateException.class)
+ public void checkCannotReleaseUnacquiredLock() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counters = new ConcurrentHashMap<String, Integer>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<String, AtomicInteger>();
+ KeyedLock<String> connectionLock = new KeyedLock<String>();
+ String[] names = new String[randomIntBetween(1, 40)];
+ connectionLock = new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(atLeast(10));
+ connectionLock.release(name);
+ }
+
+ public static class AcquireAndReleaseThread extends Thread {
+ private CountDownLatch startLatch;
+ KeyedLock<String> connectionLock;
+ String[] names;
+ ConcurrentHashMap<String, Integer> counter;
+ ConcurrentHashMap<String, AtomicInteger> safeCounter;
+
+ public AcquireAndReleaseThread(CountDownLatch startLatch, KeyedLock<String> connectionLock, String[] names,
+ ConcurrentHashMap<String, Integer> counter, ConcurrentHashMap<String, AtomicInteger> safeCounter) {
+ this.startLatch = startLatch;
+ this.connectionLock = connectionLock;
+ this.names = names;
+ this.counter = counter;
+ this.safeCounter = safeCounter;
+ }
+
+ public void run() {
+ try {
+ startLatch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ int numRuns = atLeast(500);
+ for (int i = 0; i < numRuns; i++) {
+ String curName = names[randomInt(names.length - 1)];
+ connectionLock.acquire(curName);
+ try {
+ Integer integer = counter.get(curName);
+ if (integer == null) {
+ counter.put(curName, 1);
+ } else {
+ counter.put(curName, integer.intValue() + 1);
+ }
+ } finally {
+ connectionLock.release(curName);
+ }
+ AtomicInteger atomicInteger = new AtomicInteger(0);
+ AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger);
+ if (value == null) {
+ atomicInteger.incrementAndGet();
+ } else {
+ value.incrementAndGet();
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java b/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
new file mode 100644
index 0000000..dbd3bd8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+import org.elasticsearch.transport.ConnectTransportException;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Test;
+
+public class SimpleNettyTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected TransportService build(Settings settings, Version version) {
+ int startPort = 11000 + randomIntBetween(0, 255);
+ int endPort = startPort + 10;
+ settings = ImmutableSettings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+ return new TransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), version), threadPool).start();
+ }
+
+ @Test
+ public void testConnectException() {
+ try {
+ serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress("localhost", 9876), Version.CURRENT));
+ fail();
+ } catch (ConnectTransportException e) {
+// e.printStackTrace();
+ // all is well
+ }
+ }
+} \ No newline at end of file
diff --git a/src/test/java/org/elasticsearch/tribe/TribeTests.java b/src/test/java/org/elasticsearch/tribe/TribeTests.java
new file mode 100644
index 0000000..a1c3c44
--- /dev/null
+++ b/src/test/java/org/elasticsearch/tribe/TribeTests.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Note, when talking to tribe client, no need to set the local flag on master read operations, it
+ * does it by default.
+ */
+public class TribeTests extends ElasticsearchIntegrationTest {
+
+ private TestCluster cluster2;
+ private Node tribeNode;
+ private Client tribeClient;
+
+ @Before
+ public void setupSecondCluster() {
+ // create another cluster
+ cluster2 = new TestCluster(randomLong(), 2, 2, cluster().getClusterName() + "-2");
+ cluster2.beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster2.ensureAtLeastNumNodes(2);
+
+ Settings settings = ImmutableSettings.builder()
+ .put("tribe.t1.cluster.name", cluster().getClusterName())
+ .put("tribe.t2.cluster.name", cluster2.getClusterName())
+ .put("tribe.blocks.write", false)
+ .put("tribe.blocks.read", false)
+ .build();
+
+ tribeNode = NodeBuilder.nodeBuilder()
+ .settings(settings)
+ .node();
+ tribeClient = tribeNode.client();
+ }
+
+ @After
+ public void tearDownSecondCluster() {
+ tribeNode.close();
+ cluster2.afterTest();
+ cluster2.close();
+ }
+
+ @Test
+ public void testTribeOnOneCluster() throws Exception {
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ cluster().client().admin().indices().prepareCreate("test1").get();
+ cluster2.client().admin().indices().prepareCreate("test2").get();
+
+
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().hasIndex("test1") && tribeState.getMetaData().hasIndex("test2") &&
+ tribeState.getRoutingTable().hasIndex("test1") && tribeState.getRoutingTable().hasIndex("test2");
+ }
+ });
+
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+
+ assertThat(tribeClient.admin().cluster().prepareHealth().setWaitForGreenStatus().get().getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("create 2 docs through the tribe node");
+ tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.admin().indices().prepareRefresh().get();
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 2l);
+ assertHitCount(tribeClient.prepareSearch().get(), 2l);
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().index("test1").mapping("type1") != null &&
+ tribeState.getMetaData().index("test2").mapping("type2") != null;
+ }
+ });
+
+
+ logger.info("write to another type");
+ tribeClient.prepareIndex("test1", "type2", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type2", "1").setSource("field1", "value1").get();
+ tribeClient.admin().indices().prepareRefresh().get();
+
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 4l);
+ assertHitCount(tribeClient.prepareSearch().get(), 4l);
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().index("test1").mapping("type1") != null && tribeState.getMetaData().index("test1").mapping("type2") != null &&
+ tribeState.getMetaData().index("test2").mapping("type1") != null && tribeState.getMetaData().index("test2").mapping("type2") != null;
+ }
+ });
+
+ logger.info("make sure master level write operations fail... (we don't really have a master)");
+ try {
+ tribeClient.admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get();
+ fail();
+ } catch (MasterNotDiscoveredException e) {
+ // all is well!
+ }
+
+ logger.info("delete an index, and make sure its reflected");
+ cluster2.client().admin().indices().prepareDelete("test2").get();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ return tribeState.getMetaData().hasIndex("test1") && !tribeState.getMetaData().hasIndex("test2") &&
+ tribeState.getRoutingTable().hasIndex("test1") && !tribeState.getRoutingTable().hasIndex("test2");
+ }
+ });
+
+ logger.info("stop a node, make sure its reflected");
+ cluster2.stopRandomNode();
+ awaitSameNodeCounts();
+ }
+
+ private void awaitSameNodeCounts() throws Exception {
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ DiscoveryNodes tribeNodes = tribeNode.client().admin().cluster().prepareState().get().getState().getNodes();
+ return countDataNodesForTribe("t1", tribeNodes) == cluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()
+ && countDataNodesForTribe("t2", tribeNodes) == cluster2.client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size();
+ }
+ });
+ }
+
+ private int countDataNodesForTribe(String tribeName, DiscoveryNodes nodes) {
+ int count = 0;
+ for (DiscoveryNode node : nodes) {
+ if (!node.dataNode()) {
+ continue;
+ }
+ if (tribeName.equals(node.getAttributes().get(TribeService.TRIBE_NAME))) {
+ count++;
+ }
+ }
+ return count;
+ }
+}
diff --git a/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java b/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
new file mode 100644
index 0000000..40ac68b
--- /dev/null
+++ b/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ttl;
+
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope=Scope.TEST)
+public class SimpleTTLTests extends ElasticsearchIntegrationTest {
+
+ static private final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL)
+ .put("index.number_of_shards", 2) // 2 shards to test TTL purge with routing properly
+ .put("cluster.routing.operation.use_type", false) // make sure we control the shard computation
+ .put("cluster.routing.operation.hash.type", "djb")
+ .build();
+ }
+
+ @Test
+ public void testSimpleTTL() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").endObject()
+ .endObject()
+ .endObject())
+ .addMapping("type2", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type2")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").field("default", "1d").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ long providedTTLValue = 3000;
+ logger.info("--> checking ttl");
+ // Index one doc without routing, one doc with routing, one doc with not TTL and no default and one doc with default TTL
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTTL(providedTTLValue).setRefresh(true).execute().actionGet();
+ long now = System.currentTimeMillis();
+ client().prepareIndex("test", "type1", "with_routing").setSource("field1", "value1").setTTL(providedTTLValue).setRouting("routing").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "no_ttl").setSource("field1", "value1").execute().actionGet();
+ client().prepareIndex("test", "type2", "default_ttl").setSource("field1", "value1").execute().actionGet();
+
+ // realtime get check
+ long currentTime = System.currentTimeMillis();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ long ttl0;
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // verify the ttl is still decreasing when going to the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // non realtime get (stored)
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+ // non realtime get going the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(-PURGE_INTERVAL));
+ assertThat(ttl0, lessThan(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThan(0l));
+ }
+
+ // no TTL provided so no TTL fetched
+ getResponse = client().prepareGet("test", "type1", "no_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.getField("_ttl"), nullValue());
+ // no TTL provided make sure it has default TTL
+ getResponse = client().prepareGet("test", "type2", "default_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(0L));
+
+ // make sure the purger has done its job for all indexed docs that are expired
+ long shouldBeExpiredDate = now + providedTTLValue + PURGE_INTERVAL + 2000;
+ currentTime = System.currentTimeMillis();
+ if (shouldBeExpiredDate - currentTime > 0) {
+ Thread.sleep(shouldBeExpiredDate - currentTime);
+ }
+
+ // We can't assume that after waiting for ttl + purgeInterval (waitTime) that the document have actually been deleted.
+ // The ttl purging happens in the background in a different thread, and might not have been completed after waiting for waitTime.
+ // But we can use index statistics' delete count to be sure that deletes have been executed, that must be incremented before
+ // ttl purging has finished.
+ logger.info("--> checking purger");
+ long currentDeleteCount;
+ do {
+ if (rarely()) {
+ client().admin().indices().prepareFlush("test").setFull(true).execute().actionGet();
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).execute().actionGet();
+ }
+ IndicesStatsResponse response = client().admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ } while (currentDeleteCount < 4); // TTL deletes two docs, but it is indexed in the primary shard and replica shard.
+ assertThat(currentDeleteCount, equalTo(4l));
+
+ // realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // replica realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ // Need to run a refresh, in order for the non realtime get to work.
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // non realtime get (stored) check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // non realtime get going the replica check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
new file mode 100644
index 0000000..47827d7
--- /dev/null
+++ b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.update;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.AbstractExecutableScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope=Scope.SUITE, numNodes=1)
+public class UpdateByNativeScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.settingsBuilder()
+ .put("script.native.custom.type", CustomNativeScriptFactory.class.getName())
+ .put(super.nodeSettings(nodeOrdinal))
+ .build();
+ }
+
+ @Test
+ public void testThatUpdateUsingNativeScriptWorks() throws Exception {
+ prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()).get();
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value");
+
+ Map<String, Object> params = Maps.newHashMap();
+ params.put("foo", "SETVALUE");
+ client().prepareUpdate("test", "type", "1").setScript("custom").setScriptLang("native").setScriptParams(params).get();
+
+ Map<String, Object> data = client().prepareGet("test", "type", "1").get().getSource();
+ assertThat(data, hasKey("foo"));
+ assertThat(data.get("foo").toString(), is("SETVALUE"));
+ }
+
+ static class CustomNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new CustomScript(params);
+ }
+ }
+
+ static class CustomScript extends AbstractExecutableScript {
+ private Map<String, Object> params;
+ private Map<String, Object> vars = Maps.newHashMapWithExpectedSize(2);
+
+ public CustomScript(Map<String, Object> params) {
+ this.params = params;
+ }
+
+ @Override
+ public Object run() {
+ if (vars.containsKey("ctx") && vars.get("ctx") instanceof Map) {
+ Map ctx = (Map) vars.get("ctx");
+ if (ctx.containsKey("_source") && ctx.get("_source") instanceof Map) {
+ Map source = (Map) ctx.get("_source");
+ source.putAll(params);
+ }
+ }
+ // return value does not matter, the UpdateHelper class
+ return null;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ vars.put(name, value);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/update/UpdateTests.java b/src/test/java/org/elasticsearch/update/UpdateTests.java
new file mode 100644
index 0000000..2eb35f8
--- /dev/null
+++ b/src/test/java/org/elasticsearch/update/UpdateTests.java
@@ -0,0 +1,503 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.update;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+public class UpdateTests extends ElasticsearchIntegrationTest {
+
+
+ protected void createIndex() throws Exception {
+ logger.info("--> creating index test");
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("store", "yes").endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ }
+
+ @Test
+ public void testUpdateRequest() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type", "1");
+ // simple script
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+
+ // script with params
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .startObject("params").field("param1", "value1").endObject()
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ // script with params and upsert
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.script(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ // script with doc
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("doc").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ Map<String, Object> doc = request.doc().sourceAsMap();
+ assertThat(doc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testUpsert() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ assertTrue(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ }
+
+ updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ assertFalse(updateResponse.isCreated());
+
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+ }
+
+ @Test
+ public void testUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(true)
+ .setFields("_source")
+ .execute().actionGet();
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3265
+ public void testNotUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ assertThrows(client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(false)
+ .setFields("_source")
+ .execute(), DocumentMissingException.class);
+ }
+
+ @Test
+ public void testUpsertFields() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+
+ updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra").toString(), equalTo("foo"));
+ }
+
+ @Test
+ public void testVersionedUpdate() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value"); // version is now 1
+
+ assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(2).execute(),
+ VersionConflictEngineException.class);
+
+ client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(1).get();
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l));
+
+ // and again with a higher version..
+ client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v3'").setVersion(2).get();
+
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l));
+
+ // after delete
+ client().prepareDelete("test", "type", "1").get();
+ assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(3).execute(),
+ DocumentMissingException.class);
+
+ // external versioning
+ client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get();
+ assertThrows(client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(11).setVersionType(VersionType.EXTERNAL).get();
+
+ assertThat(client().prepareGet("test", "type", "2").get().getVersion(), equalTo(11l));
+
+ // upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
+
+ // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
+ client().prepareUpdate("test", "type", "3").setScript("ctx._source.text = 'v2'").setVersion(10).setUpsert("{ \"text\": \"v0\" }").get();
+ GetResponse get = get("test", "type", "3");
+ assertThat(get.getVersion(), equalTo(1l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+ // With external versions, it means - if object is there with version lower than X, update it or explode. If it is not there, insert with new version.
+ client().prepareUpdate("test", "type", "4").setScript("ctx._source.text = 'v2'").
+ setVersion(10).setVersionType(VersionType.EXTERNAL).setUpsert("{ \"text\": \"v0\" }").get();
+ get = get("test", "type", "4");
+ assertThat(get.getVersion(), equalTo(10l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+
+ // retry on conflict is rejected:
+
+ assertThrows(client().prepareUpdate("test", "type", "1").setVersion(10).setRetryOnConflict(5), ActionRequestValidationException.class);
+
+ }
+
+ @Test
+ public void testIndexAutoCreation() throws Exception {
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"")
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+ }
+
+ @Test
+ public void testUpdate() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field++").execute().actionGet();
+ fail();
+ } catch (DocumentMissingException e) {
+ // all is well
+ }
+
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += 1").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(2L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += count").addScriptParam("count", 3).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check noop
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx.op = 'none'").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check delete
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx.op = 'delete'").execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(4L));
+ assertFalse(updateResponse.isCreated());
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+
+ // check TTL is kept after an update without TTL
+ client().prepareIndex("test", "type1", "2").setSource("field", 1).setTTL(86400000L).setRefresh(true).execute().actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ client().prepareUpdate("test", "type1", "2").setScript("ctx._source.field += 1").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+
+ // check TTL update
+ client().prepareUpdate("test", "type1", "2").setScript("ctx._ttl = 3600000").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ assertThat(ttl, lessThanOrEqualTo(3600000L));
+
+ // check timestamp update
+ client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefresh(true).execute().actionGet();
+ client().prepareUpdate("test", "type1", "3").setScript("ctx._timestamp = \"2009-11-15T14:12:12\"").execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(1258294332000L));
+
+ // check fields parameter
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setScript("ctx._source.field += 1").setFields("_source", "field").execute().actionGet();
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
+ assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue());
+
+ // check updates without script
+ // add new field
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // change existing field
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // recursive map
+ Map<String, Object> testMap = new HashMap<String, Object>();
+ Map<String, Object> testMap2 = new HashMap<String, Object>();
+ Map<String, Object> testMap3 = new HashMap<String, Object>();
+ testMap3.put("commonkey", testMap);
+ testMap3.put("map3", 5);
+ testMap2.put("map2", 6);
+ testMap.put("commonkey", testMap2);
+ testMap.put("map1", 8);
+
+ client().prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet();
+ updateResponse = client().prepareUpdate("test", "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ Map map1 = (Map) getResponse.getSourceAsMap().get("map");
+ assertThat(map1.size(), equalTo(3));
+ assertThat(map1.containsKey("map1"), equalTo(true));
+ assertThat(map1.containsKey("map3"), equalTo(true));
+ assertThat(map1.containsKey("commonkey"), equalTo(true));
+ Map map2 = (Map) map1.get("commonkey");
+ assertThat(map2.size(), equalTo(3));
+ assertThat(map2.containsKey("map1"), equalTo(true));
+ assertThat(map2.containsKey("map2"), equalTo(true));
+ assertThat(map2.containsKey("commonkey"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithBothScriptAndDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate("test", "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1")
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("can't provide both script and doc"));
+ assertThat(e.getMessage(), containsString("can't provide both script and doc"));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception {
+ createIndex();
+ ensureGreen();
+ try {
+ client().prepareUpdate("test", "type1", "1")
+ .setScript("ctx._source.field += 1")
+ .setDocAsUpsert(true)
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("doc must be specified if doc_as_upsert is enabled"));
+ assertThat(e.getMessage(), containsString("doc must be specified if doc_as_upsert is enabled"));
+ }
+ }
+
+ @Test
+ @Slow
+ public void testConcurrentUpdateWithRetryOnConflict() throws Exception {
+ final boolean useBulkApi = randomBoolean();
+ createIndex();
+ ensureGreen();
+
+ int numberOfThreads = between(2,5);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final int numberOfUpdatesPerThread = between(1000, 10000);
+ final List<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
+ for (int i = 0; i < numberOfThreads; i++) {
+ Runnable r = new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ if (useBulkApi) {
+ UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate("test", "type1", Integer.toString(i))
+ .setScript("ctx._source.field += 1")
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject());
+ client().prepareBulk().add(updateRequestBuilder).execute().actionGet();
+ } else {
+ client().prepareUpdate("test", "type1", Integer.toString(i)).setScript("ctx._source.field += 1")
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
+ .execute().actionGet();
+ }
+ }
+ } catch (Throwable e) {
+ failures.add(e);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ };
+ new Thread(r).start();
+ }
+ latch.await();
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+ assertThat(failures.size(), equalTo(0));
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getVersion(), equalTo((long) numberOfThreads));
+ assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads));
+ }
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java b/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
new file mode 100644
index 0000000..c6214fa
--- /dev/null
+++ b/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.validate;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.FilterBuilders;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matcher;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.QueryBuilders.queryString;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleValidateQuery() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setSource("foo".getBytes(Charsets.UTF_8)).execute().actionGet().isValid(), equalTo(false));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("_id:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("_i:d:1")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("foo:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("bar:hey")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("nonexistent:hello")).execute().actionGet().isValid(), equalTo(true));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryString("foo:1 AND")).execute().actionGet().isValid(), equalTo(false));
+ }
+
+ @Test
+ public void explainValidateQuery() throws Exception {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .startObject("baz").field("type", "string").field("analyzer", "snowball").endObject()
+ .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("child-type")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("child-type")
+ .startObject("_parent").field("type", "type1").endObject()
+ .startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+ ValidateQueryResponse response;
+ response = client().admin().indices().prepareValidateQuery("test")
+ .setSource("foo".getBytes(Charsets.UTF_8))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(false));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to parse"));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
+
+ assertExplanation(QueryBuilders.queryString("_id:1"), equalTo("ConstantScore(_uid:type1#1)"));
+
+ assertExplanation(QueryBuilders.idsQuery("type1").addIds("1").addIds("2"),
+ equalTo("ConstantScore(_uid:type1#1 _uid:type1#2)"));
+
+ assertExplanation(QueryBuilders.queryString("foo"), equalTo("_all:foo"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.orFilter(
+ FilterBuilders.termFilter("bar", "2"),
+ FilterBuilders.termFilter("baz", "3")
+ )
+ ), equalTo("filtered(foo:1)->cache(bar:[2 TO 2]) cache(baz:3)"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.orFilter(
+ FilterBuilders.termFilter("bar", "2")
+ )
+ ), equalTo("filtered(foo:1)->cache(bar:[2 TO 2])"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.matchAllQuery(),
+ FilterBuilders.geoPolygonFilter("pin.location")
+ .addPoint(40, -70)
+ .addPoint(30, -80)
+ .addPoint(20, -90)
+ .addPoint(40, -70) // closing polygon
+ ), equalTo("ConstantScore(GeoPolygonFilter(pin.location, [[40.0, -70.0], [30.0, -80.0], [20.0, -90.0], [40.0, -70.0]]))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoBoundingBoxFilter("pin.location")
+ .topLeft(40, -80)
+ .bottomRight(20, -70)
+ ), equalTo("ConstantScore(GeoBoundingBoxFilter(pin.location, [40.0, -80.0], [20.0, -70.0]))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceFilter("pin.location")
+ .lat(10).lon(20).distance(15, DistanceUnit.DEFAULT).geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceFilter(pin.location, PLANE, 15.0, 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceFilter("pin.location")
+ .lat(10).lon(20).distance(15, DistanceUnit.DEFAULT).geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceFilter(pin.location, PLANE, 15.0, 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceRangeFilter("pin.location")
+ .lat(10).lon(20).from("15m").to("25m").geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceRangeFilter(pin.location, PLANE, [15.0 - 25.0], 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.geoDistanceRangeFilter("pin.location")
+ .lat(10).lon(20).from("15miles").to("25miles").geoDistance(GeoDistance.PLANE)
+ ), equalTo("ConstantScore(GeoDistanceRangeFilter(pin.location, PLANE, [" + DistanceUnit.DEFAULT.convert(15.0, DistanceUnit.MILES) + " - " + DistanceUnit.DEFAULT.convert(25.0, DistanceUnit.MILES) + "], 10.0, 20.0))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.andFilter(
+ FilterBuilders.termFilter("bar", "2"),
+ FilterBuilders.termFilter("baz", "3")
+ )
+ ), equalTo("filtered(foo:1)->+cache(bar:[2 TO 2]) +cache(baz:3)"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.termsFilter("foo", "1", "2", "3")),
+ equalTo("ConstantScore(cache(foo:1 foo:2 foo:3))"));
+
+ assertExplanation(QueryBuilders.constantScoreQuery(FilterBuilders.notFilter(FilterBuilders.termFilter("foo", "bar"))),
+ equalTo("ConstantScore(NotFilter(cache(foo:bar)))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.hasChildFilter(
+ "child-type",
+ QueryBuilders.matchQuery("foo", "1")
+ )
+ ), equalTo("filtered(foo:1)->CustomQueryWrappingFilter(child_filter[child-type/type1](filtered(foo:1)->cache(_type:child-type)))"));
+
+ assertExplanation(QueryBuilders.filteredQuery(
+ QueryBuilders.termQuery("foo", "1"),
+ FilterBuilders.scriptFilter("true")
+ ), equalTo("filtered(foo:1)->ScriptFilter(true)"));
+
+ }
+
+ @Test
+ public void explainValidateQueryTwoNodes() throws IOException {
+
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .startObject("baz").field("type", "string").field("analyzer", "snowball").endObject()
+ .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+
+
+ for (Client client : cluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setSource("foo".getBytes(Charsets.UTF_8))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(false));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to parse"));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
+
+ }
+
+ for (Client client : cluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.queryString("foo"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(true));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("_all:foo"));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ }
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3629
+ public void explainDateRangeInQueryString() {
+ client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).get();
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery()
+ .setQuery(queryString("past:[now-2M/d TO now/d]")).setExplain(true).get();
+
+ assertNoFailures(response);
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ DateTime twoMonthsAgo = new DateTime(DateTimeZone.UTC).minusMonths(2).withTimeAtStartOfDay();
+ DateTime now = new DateTime(DateTimeZone.UTC).plusDays(1).withTimeAtStartOfDay();
+ assertThat(response.getQueryExplanation().get(0).getExplanation(),
+ equalTo("past:[" + twoMonthsAgo.getMillis() + " TO " + now.getMillis() + "]"));
+ assertThat(response.isValid(), equalTo(true));
+ }
+
+ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher) {
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("test")
+ .setTypes("type1")
+ .setQuery(queryBuilder)
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher);
+ assertThat(response.isValid(), equalTo(true));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java b/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
new file mode 100644
index 0000000..75a4384
--- /dev/null
+++ b/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.versioning;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void concurrentOperationOnSameDocTest() throws Exception {
+
+ logger.info("--> create an index with 1 shard and max replicas based on nodes");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", cluster().size()-1))
+ .execute().actionGet();
+
+ logger.info("execute concurrent updates on the same doc");
+ int numberOfUpdates = 100;
+ final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
+ final CountDownLatch latch = new CountDownLatch(numberOfUpdates);
+ for (int i = 0; i < numberOfUpdates; i++) {
+ client().prepareIndex("test", "type1", "1").setSource("field1", i).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e.printStackTrace();
+ failure.set(e);
+ latch.countDown();
+ }
+ });
+ }
+
+ latch.await();
+
+ assertThat(failure.get(), nullValue());
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("done indexing, check all have the same field value");
+ Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
+ for (int i = 0; i < (cluster().size() * 5); i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
+ }
+ }
+}
diff --git a/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
new file mode 100644
index 0000000..95cce96
--- /dev/null
+++ b/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.versioning;
+
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleVersioningTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testExternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ // Note - external version doesn't throw version conflicts on deletes of non existent records. This is different from internal versioning
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+
+ // this should conflict with the delete command transaction which told us that the object was deleted at version 17.
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class
+ );
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(18).
+ setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(18L));
+ }
+
+ @Test
+ public void testExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(14).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(14l));
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(14l));
+ }
+
+ // deleting with a lower version fails.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ // Delete with a higher version deletes all versions up to the given one.
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(17l));
+
+ // Deleting with a lower version keeps on failing after a delete.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+
+ // But delete with a higher version is OK.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(18l));
+
+
+ // TODO: This behavior breaks rest api returning http status 201, good news is that it this is only the case until deletes GC kicks in.
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(19).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(19l));
+
+
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(20l));
+
+ // Make sure that the next delete will be GC. Note we do it on the index settings so it will be cleaned up
+ HashMap<String,Object> newSettings = new HashMap<String, Object>();
+ newSettings.put("index.gc_deletes",-1);
+ client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet();
+
+ Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance...
+
+ // And now we have previous version return -1
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(20l));
+ }
+
+ @Test
+ public void testInternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(17).execute(),
+ VersionConflictEngineException.class);
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")
+ .setCreate(true).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+
+
+ @Test
+ public void testInternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ // search with versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+
+ // search without versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(Versions.NOT_FOUND));
+ }
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(2).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(3l));
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(2).execute(), VersionConflictEngineException.class);
+
+
+ // This is intricate - the object was deleted but a delete transaction was with the right version. We add another one
+ // and thus the transaction is increased.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(3).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(4l));
+ }
+
+ @Test
+ public void testSimpleVersioningWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void testVersioningWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+}
diff --git a/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java b/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
new file mode 100644
index 0000000..ce40171
--- /dev/null
+++ b/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.io.Files.*;
+import static org.elasticsearch.common.io.FileSystemUtils.deleteRecursively;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class FileWatcherTest extends ElasticsearchTestCase {
+
+ private class RecordingChangeListener extends FileChangesListener {
+
+ private File rootDir;
+
+ private RecordingChangeListener(File rootDir) {
+ this.rootDir = rootDir;
+ }
+
+ private String getRelativeFileName(File file) {
+ return rootDir.toURI().relativize(file.toURI()).getPath();
+ }
+
+ private List<String> notifications = newArrayList();
+
+ @Override
+ public void onFileInit(File file) {
+ notifications.add("onFileInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryInit(File file) {
+ notifications.add("onDirectoryInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileCreated(File file) {
+ notifications.add("onFileCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileDeleted(File file) {
+ notifications.add("onFileDeleted: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileChanged(File file) {
+ notifications.add("onFileChanged: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryCreated(File file) {
+ notifications.add("onDirectoryCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryDeleted(File file) {
+ notifications.add("onDirectoryDeleted: " + getRelativeFileName(file));
+ }
+
+ public List<String> notifications() {
+ return notifications;
+ }
+ }
+
+ @Test
+ public void testSimpleFileOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testFile = new File(tempDir, "test.txt");
+ touch(testFile);
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(equalTo("onFileInit: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ append("Test", testFile, Charset.defaultCharset());
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileChanged: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ testFile.delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileDeleted: test.txt")));
+
+ }
+
+ @Test
+ public void testSimpleDirectoryOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test.txt"));
+ touch(new File(testDir, "test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onFileInit: test-dir/test.txt"),
+ equalTo("onFileInit: test-dir/test0.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ for (int i = 0; i < 4; i++) {
+ touch(new File(testDir, "test" + i + ".txt"));
+ }
+ // Make sure that first file is modified
+ append("Test", new File(testDir, "test0.txt"), Charset.defaultCharset());
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileChanged: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test3.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ new File(testDir, "test1.txt").delete();
+ new File(testDir, "test2.txt").delete();
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test1.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ new File(testDir, "test0.txt").delete();
+ touch(new File(testDir, "test2.txt"));
+ touch(new File(testDir, "test4.txt"));
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+
+ new File(testDir, "test3.txt").delete();
+ new File(testDir, "test4.txt").delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test3.txt"),
+ equalTo("onFileDeleted: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+ deleteRecursively(testDir);
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt"),
+ equalTo("onDirectoryDeleted: test-dir")
+ ));
+
+ }
+
+ @Test
+ public void testNestedDirectoryOperations() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test.txt"));
+ new File(testDir, "sub-dir").mkdir();
+ touch(new File(testDir, "sub-dir/test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/test.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new file in subdirectory
+ touch(new File(testDir, "sub-dir/test1.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new subdirectory in subdirectory
+ new File(testDir, "first-level").mkdir();
+ touch(new File(testDir, "first-level/file1.txt"));
+ new File(testDir, "first-level/second-level").mkdir();
+ touch(new File(testDir, "first-level/second-level/file2.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/first-level/"),
+ equalTo("onFileCreated: test-dir/first-level/file1.txt"),
+ equalTo("onDirectoryCreated: test-dir/first-level/second-level/"),
+ equalTo("onFileCreated: test-dir/first-level/second-level/file2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Delete a directory, check notifications for
+ deleteRecursively(new File(testDir, "first-level"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/first-level/file1.txt"),
+ equalTo("onFileDeleted: test-dir/first-level/second-level/file2.txt"),
+ equalTo("onDirectoryDeleted: test-dir/first-level/second-level"),
+ equalTo("onDirectoryDeleted: test-dir/first-level")
+ ));
+ }
+
+ @Test
+ public void testFileReplacingDirectory() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ File subDir = new File(testDir, "sub-dir");
+ subDir.mkdir();
+ touch(new File(subDir, "test0.txt"));
+ touch(new File(subDir, "test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+
+ deleteRecursively(subDir);
+ touch(subDir);
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/sub-dir/test1.txt"),
+ equalTo("onDirectoryDeleted: test-dir/sub-dir"),
+ equalTo("onFileCreated: test-dir/sub-dir")
+ ));
+
+ changes.notifications().clear();
+
+ subDir.delete();
+ subDir.mkdir();
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/"),
+ equalTo("onDirectoryCreated: test-dir/sub-dir/")
+ ));
+ }
+
+ @Test
+ public void testEmptyDirectory() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+ testDir.mkdir();
+ touch(new File(testDir, "test0.txt"));
+ touch(new File(testDir, "test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ changes.notifications().clear();
+
+ new File(testDir, "test0.txt").delete();
+ new File(testDir, "test1.txt").delete();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoDirectoryOnInit() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testDir = new File(tempDir, "test-dir");
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ testDir.mkdir();
+ touch(new File(testDir, "test0.txt"));
+ touch(new File(testDir, "test1.txt"));
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/"),
+ equalTo("onFileCreated: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoFileOnInit() throws IOException {
+ File tempDir = newTempDir(LifecycleScope.TEST);
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ File testFile = new File(tempDir, "testfile.txt");
+
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ touch(testFile);
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: testfile.txt")
+ ));
+ }
+
+} \ No newline at end of file